repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_mem_move_2.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> sum(num_items, 0);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer sum_buf(sum, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) {
int t = a_acc[i] + b_acc[i];
if (t > 10)
sum_acc[i] = 1;
else
sum_acc[i] = 0;
});
});
//# Kernel 2
q.submit([&](auto &h) {
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; });
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_loop.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <chrono>
constexpr int N = 16;
constexpr int STEPS = 10000;
int main() {
std::vector<int> a(N, 1);
std::vector<int> b(N, 2);
std::vector<int> c(N);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
sycl::queue q;
sycl::buffer<int> a_buf(a);
sycl::buffer<int> b_buf(b);
for (int j = 0; j < STEPS; j++) {
//# Buffer c in the loop
sycl::buffer<int> c_buf(c);
q.submit([&](auto &h) {
// Create device accessors.
sycl::accessor a_acc(a_buf, h);
sycl::accessor b_acc(b_buf, h);
sycl::accessor c_acc(c_buf, h, sycl::no_init);
h.parallel_for(N, [=](auto i) {
c_acc[i] = (a_acc[i] < b_acc[i]) ? -1 : 1;
a_acc[i] += c_acc[i];
b_acc[i] -= c_acc[i];
});
});
}
// Create host accessors.
const sycl::host_accessor ha(a_buf);
const sycl::host_accessor hb(b_buf);
printf("%d %d\n", ha[N / 2], hb[N / 2]);
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 16;
std::vector<int> host_data(N, 10);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# Modify data array on device
sycl::buffer buffer_data(host_data);
q.submit([&](sycl::handler& h) {
sycl::accessor device_data(buffer_data, h);
h.parallel_for(N, [=](auto i) { device_data[i] += 1; });
});
sycl::host_accessor ha(buffer_data, sycl::read_only);
//# print output
for (int i = 0; i < N; i++) std::cout << ha[i] << " ";std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_host_ptr.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 16;
constexpr int iter = 1;
std::vector<int> a(num_items, 10);
std::vector<int> b(num_items, 10);
std::vector<int> sum(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer sum_buf(sum, props);
{
sycl::host_accessor a_host_acc(a_buf);
std::cout << "address of vector a = " << a.data() << "\n";
std::cout << "buffer memory address = " << a_host_acc.get_pointer() << "\n";
}
q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
sycl::stream out(1024 * 1024, 1 * 128, h);
h.parallel_for(num_items, [=](auto i) {
if (i[0] == 0)
out << "device accessor address = " << a_acc.get_pointer() << "\n";
sum_acc[i] = a_acc[i] + b_acc[i];
});
}).wait();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_mem_move_0.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> sum(num_items, 0);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer sum_buf(sum, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
{
sycl::host_accessor h_acc(sum_buf);
for (int j = 0; j < num_items; j++)
if (h_acc[j] > 10)
h_acc[j] = 1;
else
h_acc[j] = 0;
}
//# Kernel 2
q.submit([&](auto &h) {
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; });
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_access_modes.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000000;
int main() {
std::vector<int> a(N, 1);
std::vector<int> b(N, 2);
std::vector<int> c(N);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
sycl::queue q;
{
sycl::buffer<int> a_buf(a);
sycl::buffer<int> b_buf(b);
sycl::buffer<int> c_buf(c);
q.submit([&](auto &h) {
// Create device accessors.
sycl::accessor a_acc(a_buf, h);
sycl::accessor b_acc(b_buf, h);
sycl::accessor c_acc(c_buf, h);
h.parallel_for(N, [=](auto i) {
c_acc[i] = a_acc[i] + b_acc[i];
});
});
}
std::cout << "C = " << c[N/2] << "\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/usm_copy_partial.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
static constexpr size_t N = 102400000; // global size
int main() {
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# setup queue with default selector
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize data array using usm
int *data = static_cast<int *>(malloc(N * sizeof(int)));
for (int i = 0; i < N; i++) data[i] = 1;
//# USM device allocation
auto device_data = sycl::malloc_device<int>(N, q);
//# copy mem from host to device
q.memcpy(device_data, data, sizeof(int) * N).wait();
//# single_task kernel performing simple addition of all elements
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i++){
sum += device_data[i];
}
device_data[0] = sum;
}).wait();
//# copy mem from device to host
q.memcpy(data, device_data, sizeof(int) * N).wait();
std::cout << "Sum = " << data[0] << "\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
sycl::free(device_data, q);
free(data);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/usm_overlap_copy.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#define NITERS 10
#define KERNEL_ITERS 10000
#define NUM_CHUNKS 10
#define CHUNK_SIZE 10000000
int main() {
const int num_chunks = NUM_CHUNKS;
const int chunk_size = CHUNK_SIZE;
const int iter = NITERS;
sycl::queue q;
//# Allocate and initialize host data
float *host_data[num_chunks];
for (int c = 0; c < num_chunks; c++) {
host_data[c] = sycl::malloc_host<float>(chunk_size, q);
float val = c;
for (int i = 0; i < chunk_size; i++)
host_data[c][i] = val;
}
std::cout << "Allocated host data\n";
//# Allocate and initialize device memory
float *device_data[num_chunks];
for (int c = 0; c < num_chunks; c++) {
device_data[c] = sycl::malloc_device<float>(chunk_size, q);
float val = 1000.0;
q.fill<float>(device_data[c], val, chunk_size);
}
q.wait();
std::cout << "Allocated device data\n";
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
for (int it = 0; it < iter; it++) {
for (int c = 0; c < num_chunks; c++) {
//# Copy-in not dependent on previous event
auto copy_in_event = q.memcpy(device_data[c], host_data[c], sizeof(float) * chunk_size);
//# Compute waits for copy_in_event
auto compute_event = q.parallel_for(chunk_size, copy_in_event, [=](auto id) {
for (int i = 0; i < KERNEL_ITERS; i++) device_data[c][id] += 1.0;
});
//# Copy out waits for compute_event
auto copy_out_event = q.memcpy(host_data[c], device_data[c], sizeof(float) * chunk_size, compute_event);
}
q.wait();
}
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
for (int c = 0; c < num_chunks; c++) {
for (int i = 0; i < chunk_size; i++) {
if (host_data[c][i] != (float)((c + KERNEL_ITERS * iter))) {
std::cout << "Mismatch for chunk: " << c << " position: " << i
<< " expected: " << c + 10000 << " got: " << host_data[c][i]
<< "\n";
break;
}
}
}
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/usm_shared.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# USM allocation using malloc_shared
constexpr int N = 16;
int *data = sycl::malloc_shared<int>(N, q);
//# Initialize data array
for (int i = 0; i < N; i++) data[i] = 10;
//# Modify data array on device
q.parallel_for(N, [=](auto i) { data[i] += 1; }).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << data[i] << " ";std::cout << "\n";
sycl::free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_mem_move_3.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) {
int t = a_acc[i] + b_acc[i];
if (t > 10)
res_acc[i] = c_acc[i] + d_acc[i] ;
else
res_acc[i] = d_acc[i];
});
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/align.hpp | #ifndef __ALIGN
#define __ALIGN 1
enum class Alignment : size_t {
Normal = sizeof(void *),
SSE = 16,
AVX = 32,
PAGE = 4096,
};
namespace detail {
void *allocate_aligned_memory(size_t align, size_t size);
void deallocate_aligned_memory(void *ptr) noexcept;
} // namespace detail
template <typename T, Alignment Align = Alignment::PAGE> class AlignedAllocator;
template <Alignment Align> class AlignedAllocator<void, Align> {
public:
typedef void *pointer;
typedef const void *const_pointer;
typedef void value_type;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
};
template <typename T, Alignment Align> class AlignedAllocator {
public:
typedef T value_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::true_type propagate_on_container_move_assignment;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
public:
AlignedAllocator() noexcept {}
template <class U>
AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {}
size_type max_size() const noexcept {
return (size_type(~0) - size_type(Align)) / sizeof(T);
}
pointer address(reference x) const noexcept { return std::addressof(x); }
const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
pointer allocate(size_type n,
typename AlignedAllocator<void, Align>::const_pointer = 0) {
const size_type alignment = static_cast<size_type>(Align);
void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T));
if (ptr == nullptr) {
throw std::bad_alloc();
}
return reinterpret_cast<pointer>(ptr);
}
void deallocate(pointer p, size_type) noexcept {
return detail::deallocate_aligned_memory(p);
}
template <class U, class... Args> void construct(U *p, Args &&... args) {
::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...);
}
void destroy(pointer p) { p->~T(); }
};
template <typename T, Alignment Align> class AlignedAllocator<const T, Align> {
public:
typedef T value_type;
typedef const T *pointer;
typedef const T *const_pointer;
typedef const T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::true_type propagate_on_container_move_assignment;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
public:
AlignedAllocator() noexcept {}
template <class U>
AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {}
size_type max_size() const noexcept {
return (size_type(~0) - size_type(Align)) / sizeof(T);
}
const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
pointer allocate(size_type n,
typename AlignedAllocator<void, Align>::const_pointer = 0) {
const size_type alignment = static_cast<size_type>(Align);
void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T));
if (ptr == nullptr) {
throw std::bad_alloc();
}
return reinterpret_cast<pointer>(ptr);
}
void deallocate(pointer p, size_type) noexcept {
return detail::deallocate_aligned_memory(p);
}
template <class U, class... Args> void construct(U *p, Args &&... args) {
::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...);
}
void destroy(pointer p) { p->~T(); }
};
template <typename T, Alignment TAlign, typename U, Alignment UAlign>
inline bool operator==(const AlignedAllocator<T, TAlign> &,
const AlignedAllocator<U, UAlign> &) noexcept {
return TAlign == UAlign;
}
template <typename T, Alignment TAlign, typename U, Alignment UAlign>
inline bool operator!=(const AlignedAllocator<T, TAlign> &,
const AlignedAllocator<U, UAlign> &) noexcept {
return TAlign != UAlign;
}
void *detail::allocate_aligned_memory(size_t align, size_t size) {
assert(align >= sizeof(void *));
// assert(nail::is_power_of_two(align));
if (size == 0) {
return nullptr;
}
void *ptr = nullptr;
int rc = posix_memalign(&ptr, align, size);
if (rc != 0) {
return nullptr;
}
return ptr;
}
void detail::deallocate_aligned_memory(void *ptr) noexcept { return free(ptr); }
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/ParallelPatterns/PrefixSum/src/PrefixSum.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
//
// PrefixSum: this code sample implements the inclusive scan (prefix sum) in
// parallel. That is, given a randomized sequence of numbers x0, x1, x2, ...,
// xn, this algorithm computes and returns a new sequence y0, y1, y2, ..., yn so
// that
//
// y0 = x0
// y1 = x0 + x1
// y2 = x0 + x1 + x2
// .....
// yn = x0 + x1 + x2 + ... + xn
//
// Below is the pseudo code for computing prefix sum in parallel:
//
// n is power of 2 (1, 2, 4 , 8, 16, ...):
//
// for i from 0 to [log2 n] - 1 do
// for j from 0 to (n-1) do in parallel
// if j<2^i then
// x_{j}^{i+1} <- x_{j}^{i}}
// else
// x_{j}^{i+1} <- x_{j}^{i} + x_{j-2^{i}}^{i}}
//
// In the above, the notation x_{j}^{i} means the value of the jth element of
// array x in timestep i. Given n processors to perform each iteration of the
// inner loop in constant time, the algorithm as a whole runs in O(log n) time,
// the number of iterations of the outer loop.
//
#include <iostream>
#include <string>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp
#include "dpc_common.hpp"
using namespace sycl;
using namespace std;
void Show(int a[], int arraysize) {
for (int i = 0; i < arraysize; ++i) {
cout << a[i] << " ";
if ((i % 16) == 15) cout << "\n";
}
cout << "\n";
return;
}
int* ParallelPrefixSum(int* current, int* next, unsigned int nb, queue& q) {
unsigned int two_power = 1;
unsigned int num_iter = log2(nb);
// unsigned int uintmax = UINT_MAX;
int* result = NULL;
// cout << "uintmax " << uintmax << " " << log2(uintmax) << "\n";
// Buffer scope
{
buffer sequence_buf(current, range(nb));
buffer sequence_next_buf(next, range(nb));
// Iterate over the necessary iterations.
for (unsigned int iter = 0; iter < num_iter; iter++, two_power *= 2) {
// Submit command group for execution
q.submit([&](auto& h) {
// Create accessors
accessor sequence(sequence_buf, h);
accessor sequence_next(sequence_next_buf, h);
if (iter % 2 == 0) {
h.parallel_for(nb, [=](id<1> j) {
if (j < two_power) {
sequence_next[j] = sequence[j];
} else {
sequence_next[j] = sequence[j] + sequence[j - two_power];
}
}); // end parallel for loop in kernel
result = next;
} else {
h.parallel_for(nb, [=](id<1> j) {
if (j < two_power) {
sequence[j] = sequence_next[j];
} else {
sequence[j] = sequence_next[j] + sequence_next[j - two_power];
}
}); // end parallel for loop in kernel
result = current;
}
}); // end device queue
} // end iteration
} // Buffer scope
// Wait for commands to complete. Enforce synchronization on the command queue
q.wait_and_throw();
return result;
}
/*
void PrefixSum(int* x, unsigned int nb)
{
unsigned int two_power = 1;
unsigned int num_iter = log2(nb);
int temp = 0;
// Iterate over the necessary iterations
for (unsigned int iter = 0; iter < num_iter; iter++, two_power*=2) {
//Show(x, nb);
// cout << "two_power: " << two_power << "\n";
for (unsigned int j = nb; j > 0; j--) {
if (j < two_power) {
x[j] = x[j];
}
else {
x[j] = x[j] + x[j - two_power];
}
}
}
}
*/
void Usage(string prog_name, int exponent) {
cout << " Incorrect parameters\n";
cout << " Usage: " << prog_name << " n k \n\n";
cout << " n: Integer exponent presenting the size of the input array.\n";
cout << " The number of element in the array must be power of 2\n";
cout << " (e.g., 1, 2, 4, ...). Please enter the corresponding exponent\n";
cout << " betwwen 0 and " << exponent - 1 << ".\n";
cout << " k: Seed used to generate a random sequence.\n";
}
int main(int argc, char* argv[]) {
unsigned int nb, seed;
int n, exp_max = log2(numeric_limits<int>::max());
// Read parameters.
try {
n = stoi(argv[1]);
// Verify the boundary of acceptance.
if (n < 0 || n >= exp_max) {
Usage(argv[0], exp_max);
return -1;
}
seed = stoi(argv[2]);
nb = pow(2, n);
} catch (...) {
Usage(argv[0], exp_max);
return -1;
}
cout << "\nSequence size: " << nb << ", seed: " << seed;
int num_iter = log2(nb);
cout << "\nNum iteration: " << num_iter << "\n";
// Create a device queue using SYCL class queue
queue q(default_selector_v);
cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n";
int* data = new int[nb];
int* prefix_sum1 = new int[nb];
int* prefix_sum2 = new int[nb];
int* result = NULL;
srand(seed);
// Initialize data arrays
for (int i = 0; i < nb; i++) {
data[i] = prefix_sum1[i] = rand() % 10;
prefix_sum2[i] = 0;
}
// Start timer
dpc_common::TimeInterval t;
result = ParallelPrefixSum(prefix_sum1, prefix_sum2, nb, q);
auto elapsed_time = t.Elapsed();
cout << "Elapsed time: " << elapsed_time << " s\n";
// cout << "\ndata after transforming using parallel prefix sum result:";
// Show(result, nb);
bool equal = true;
if (result[0] != data[0])
equal = false;
else {
for (int i = 1; i < nb; i++) {
if (result[i] != result[i - 1] + data[i]) {
equal = false;
break;
}
}
}
delete[] data;
delete[] prefix_sum1;
delete[] prefix_sum2;
if (!equal) {
cout << "\nFailed: " << std::endl;
return -2;
} else {
cout << "\nSuccess!" << std::endl;
return 0;
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/ParallelPatterns/loop-unroll/src/loop-unroll.cpp | //==============================================================
// This sample demonstrates the use of loop unrolling as a simple optimization
// technique to speed up compute and increase memory access throughput.
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
#include <iostream>
#include <vector>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities//include/dpc_common.hpp
#include "dpc_common.hpp"
using namespace std;
using namespace sycl;
template <int unroll_factor>
class VAdd;
// Adds corresponding elements of two input vectors using a loop. The loop is
// unrolled as many times as specified by the unroll factor.
template <int unroll_factor>
void VectorAdd(queue &q, const vector<float> &a, const vector<float> &b,
vector<float> &sum) {
size_t n = a.size();
buffer buffer_a(a);
buffer buffer_b(b);
buffer buffer_sum(sum);
event e = q.submit([&](handler &h) {
accessor acc_a(buffer_a, h, read_only);
accessor acc_b(buffer_b, h, read_only);
accessor acc_sum(buffer_sum, h, write_only, no_init);
h.single_task<VAdd<unroll_factor>>([=]()[[intel::kernel_args_restrict]] {
// Unroll loop as specified by the unroll factor.
#pragma unroll unroll_factor
for (size_t i = 0; i < n; i++) {
acc_sum[i] = acc_a[i] + acc_b[i];
}
});
});
double start = e.get_profiling_info<info::event_profiling::command_start>();
double end = e.get_profiling_info<info::event_profiling::command_end>();
// Convert from nanoseconds to milliseconds.
double kernel_time = (end - start) * 1e-6;
cout << "Unroll factor: " << unroll_factor << " Kernel time: " << kernel_time
<< " ms\n";
cout << "Throughput for kernel with unroll factor " << unroll_factor << ": ";
cout << std::fixed << std::setprecision(3) << ((double)n / kernel_time) / 1e6f
<< " GFlops\n";
}
// Initialize vector.
void InitializeVector(vector<float> &a) {
size_t n = a.size();
for (size_t i = 0; i < n; i++) {
a[i] = i;
}
}
// Verify results.
void VerifyResults(const vector<float> &a, const vector<float> &b,
const vector<float> &sum) {
size_t n = a.size();
for (size_t i = 0; i < n; i++) {
if (sum[i] != a[i] + b[i]) {
cout << "FAILED: The results are incorrect.\n";
exit(1);
}
}
}
int main() {
constexpr size_t n = 1 << 25;
cout << "Input array size: " << n << "\n";
// Input vectors.
vector<float> a(n);
vector<float> b(n);
// Output vector.
vector<float> sum(n);
try {
queue q(default_selector_v,
property::queue::enable_profiling{});
cout << "Running on device: "
<< q.get_device().get_info<info::device::name>() << "\n";
// Instantiate VectorAdd kernel with different unroll factors: 1, 2, 4,
// 8, 16. The VectorAdd kernel contains a loop that adds corresponding
// elements of two input vectors. That loop is unrolled by the specified
// unroll factor.
VectorAdd<1>(q, a, b, sum);
VerifyResults(a, b, sum);
VectorAdd<2>(q, a, b, sum);
VerifyResults(a, b, sum);
VectorAdd<4>(q, a, b, sum);
VerifyResults(a, b, sum);
VectorAdd<8>(q, a, b, sum);
VerifyResults(a, b, sum);
VectorAdd<16>(q, a, b, sum);
VerifyResults(a, b, sum);
} catch (sycl::exception const &e) {
cerr << "SYCL host exception:\n" << e.what() << "\n";
terminate();
}
cout << "PASSED: The results are correct.\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/ParallelPatterns/dpc_reduce/src/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// The include folder is located at %ONEAPI_ROOT%\dev-utilities\latest\include
// on your development system.
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <mpi.h>
#include <sycl/sycl.hpp>
#include <iomanip> // setprecision library
#include <iostream>
#include <numeric>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp
#include "dpc_common.hpp"
using namespace sycl;
constexpr int master = 0;
// cpu_seq is a simple sequential CPU routine
// that calculates all the slices and then
// does a reduction.
float calc_pi_cpu_seq(int num_steps) {
float step = 1.0 / (float)num_steps;
float x;
float sum = 0.0;
for (int i = 1; i < num_steps; i++) {
x = (i - 0.5) * step;
sum = sum + 4.0 / (1.0 + x * x);
}
return sum / (float)num_steps;
}
// cpu_tbb is a simple parallel_reduce tbb routine
// that calculates all the slices and then
// uses tbb reduce to combine results.
float calc_pi_cpu_tbb(int num_steps) {
float step = 1.0 / (float)num_steps;
auto tbbtotal =
tbb::parallel_reduce(tbb::blocked_range<int>(1, num_steps), 0.0,
[=](tbb::blocked_range<int> r, float running_total) {
float y;
for (int i = r.begin(); i != r.end(); i++) {
y = (i - 0.5) * step;
running_total += 4.0 / (1.0 + y * y);
}
return running_total;
},
std::plus<float>());
return tbbtotal / (float)num_steps;
}
// onedpl_native uses a parallel_for to fill
// a buffer with all the slice calculations and
// then uses a single_task to combine all the results
// This is not the highest performing example but shows
// how to do calculations directly in SYCL with
// mininmal complexity.
template <typename Policy>
float calc_pi_onedpl_native(size_t num_steps, Policy&& policy) {
float data[num_steps];
// Create buffer using host allocated "data" array
buffer<float, 1> buf{data, range<1>{num_steps}};
policy.queue().submit([&](handler& h) {
accessor writeresult(buf,h,write_only);
h.parallel_for(range<1>{num_steps}, [=](id<1> idx) {
float x = ((float)idx[0] - 0.5) / (float)num_steps;
writeresult[idx[0]] = 4.0f / (1.0 + x * x);
});
});
policy.queue().wait();
// Single task is needed here to make sure
// data is not written over.
policy.queue().submit([&](handler& h) {
accessor a(buf,h);
h.single_task([=]() {
for (int i = 1; i < num_steps; i++) a[0] += a[i];
});
});
policy.queue().wait();
// float mynewresult = buf.get_access<access::mode::read>()[0] / (float)num_steps;
host_accessor answer(buf,read_only) ;
float mynewresult = answer[0]/(float)num_steps;
return mynewresult;
}
// This option uses a parallel for to fill the array, and then use a single
// task to reduce into groups and then use cpu for final reduction.
template <typename Policy>
float calc_pi_onedpl_native2(size_t num_steps, Policy&& policy, int group_size) {
float data[num_steps];
// Create buffer using host allocated "data" array
buffer<float, 1> buf{data, range<1>{num_steps}};
// fill buffer with calculations
policy.queue().submit([&](handler& h) {
accessor writeresult(buf, h, write_only);
h.parallel_for(range<1>{num_steps}, [=](id<1> idx) {
float x = ((float)idx[0] - 0.5) / (float)num_steps;
writeresult[idx[0]] = 4.0f / (1.0 + x * x);
});
});
policy.queue().wait();
size_t num_groups = num_steps / group_size;
float c[num_groups];
// create a number of groups and do a local reduction
// within these groups using single_task. Store each
// result within the output of bufc
for (int i = 0; i < num_groups; i++) c[i] = 0;
buffer<float, 1> bufc{c, range<1>{num_groups}};
for (int j = 0; j < num_groups; j++) {
policy.queue().submit([&](handler& h) {
accessor my_a(buf,h,read_only);
accessor my_c(bufc,h,write_only);
h.single_task([=]() {
for (int i = 0 + group_size * j; i < group_size + group_size * j; i++)
my_c[j] += my_a[i];
});
});
}
policy.queue().wait();
host_accessor src(bufc,read_only);
// Sum up results on CPU
float mynewresult = 0.0;
for (int i = 0; i < num_groups; i++) mynewresult += src[i];
return mynewresult / (float)num_steps;
}
// Function operator used as transform operation in transform-reduce operations
// implemented below.
struct my_no_op {
template <typename Tp>
Tp&& operator()(Tp&& a) const {
return std::forward<Tp>(a);
}
};
// Structure slice area performs the calculations for
// each rectangle that will be summed up.
struct slice_area {
int num;
slice_area(int num_steps) { num = num_steps; }
template <typename T>
float operator()(T&& i) const {
float x = ((float)i - 0.5) / (float)num;
return 4.0f / (1.0f + (x * x));
};
};
// a way to get value_type from both accessors and USM that is needed for transform_init
template <typename Unknown>
struct accessor_traits_impl
{
};
template <typename T, int Dim>
struct accessor_traits_impl<sycl::local_accessor<T, Dim>>
{
using value_type = typename sycl::local_accessor<T, Dim>::value_type;
};
template <typename T, int Dim, sycl::access::mode AccMode, sycl::access::target AccTarget,
sycl::access::placeholder Placeholder>
struct accessor_traits_impl<sycl::accessor<T, Dim, AccMode, AccTarget, Placeholder>>
{
using value_type = typename sycl::accessor<T, Dim, AccMode, AccTarget, Placeholder>::value_type;
};
template <typename RawArrayValueType>
struct accessor_traits_impl<RawArrayValueType*>
{
using value_type = RawArrayValueType;
};
template <typename Unknown>
using accessor_traits = accessor_traits_impl<typename std::decay<Unknown>::type>;
// calculate shift where we should start processing on current item
template <typename NDItemId, typename GlobalIdx, typename SizeNIter, typename SizeN>
SizeN
calc_shift(const NDItemId item_id, const GlobalIdx global_idx, SizeNIter& n_iter, const SizeN n)
{
auto global_range_size = item_id.get_global_range().size();
auto start = n_iter * global_idx;
auto global_shift = global_idx + n_iter * global_range_size;
if (n_iter > 0 && global_shift > n)
{
start += n % global_range_size - global_idx;
}
else if (global_shift < n)
{
n_iter++;
}
return start;
}
template <typename ExecutionPolicy, typename Operation1, typename Operation2>
struct transform_init
{
Operation1 binary_op;
Operation2 unary_op;
template <typename NDItemId, typename GlobalIdx, typename Size, typename AccLocal, typename... Acc>
void
operator()(const NDItemId item_id, const GlobalIdx global_idx, Size n, AccLocal& local_mem,
const Acc&... acc) const
{
auto local_idx = item_id.get_local_id(0);
auto global_range_size = item_id.get_global_range().size();
auto n_iter = n / global_range_size;
auto start = calc_shift(item_id, global_idx, n_iter, n);
auto shifted_global_idx = global_idx + start;
typename accessor_traits<AccLocal>::value_type res;
if (global_idx < n)
{
res = unary_op(shifted_global_idx, acc...);
}
// Add neighbour to the current local_mem
for (decltype(n_iter) i = 1; i < n_iter; ++i)
{
res = binary_op(res, unary_op(shifted_global_idx + i, acc...));
}
if (global_idx < n)
{
local_mem[local_idx] = res;
}
}
};
// Reduce on local memory
template <typename ExecutionPolicy, typename BinaryOperation1, typename Tp>
struct reduce
{
BinaryOperation1 bin_op1;
template <typename NDItemId, typename GlobalIdx, typename Size, typename AccLocal>
Tp
operator()(const NDItemId item_id, const GlobalIdx global_idx, const Size n, AccLocal& local_mem) const
{
auto local_idx = item_id.get_local_id(0);
auto group_size = item_id.get_local_range().size();
auto k = 1;
do
{
item_id.barrier(sycl::access::fence_space::local_space);
if (local_idx % (2 * k) == 0 && local_idx + k < group_size && global_idx < n &&
global_idx + k < n)
{
local_mem[local_idx] = bin_op1(local_mem[local_idx], local_mem[local_idx + k]);
}
k *= 2;
} while (k < group_size);
return local_mem[local_idx];
}
};
// walk through the data
template <typename ExecutionPolicy, typename F>
struct walk_n
{
F f;
template <typename ItemId, typename... Ranges>
auto
operator()(const ItemId idx, Ranges&&... rngs) const -> decltype(f(rngs[idx]...))
{
return f(rngs[idx]...);
}
};
// This option uses a parallel for to fill the buffer and then
// uses a tranform_init with plus/no_op and then
// a local reduction then global reduction.
template <typename Policy>
float calc_pi_onedpl_native3(size_t num_steps, int groups, Policy&& policy) {
float data[num_steps];
// Create buffer using host allocated "data" array
buffer<float, 1> buf{data, range<1>{num_steps}};
// fill the buffer with the calculation using parallel for
policy.queue().submit([&](handler& h) {
accessor writeresult(buf,h,write_only);
h.parallel_for(range<1>{num_steps}, [=](id<1> idx) {
float x = (float)idx[0] / (float)num_steps;
writeresult[idx[0]] = 4.0f / (1.0f + x * x);
});
});
policy.queue().wait();
using Functor = walk_n<Policy, my_no_op>;
// Functor will do nothing for tranform_init and will use plus for reduce.
// In this example we have done the calculation and filled the buffer above
// The way transform_init works is that you need to have the value already
// populated in the buffer.
auto tf_init = transform_init<Policy, std::plus<float>,
Functor>{std::plus<float>(), Functor{my_no_op()}};
auto combine = std::plus<float>();
auto brick_reduce = reduce<Policy, std::plus<float>, float>{
std::plus<float>()};
auto workgroup_size =
policy.queue()
.get_device()
.template get_info<info::device::max_work_group_size>();
auto max_comp_u = policy.queue()
.get_device()
.template get_info<info::device::max_compute_units>();
auto n_groups = (num_steps - 1) / workgroup_size + 1;
n_groups =
std::min(decltype(n_groups)(max_comp_u),
n_groups); // make groups max number of compute units or less
// 0. Create temporary global buffer to store temporary value
auto temp_buf = buffer<float, 1>(range<1>(n_groups));
// 1. Reduce over each work_group
auto local_reduce_event =
policy.queue().submit([&buf, &temp_buf, &brick_reduce, &tf_init,
num_steps, n_groups, workgroup_size](handler& h) {
accessor access_buf(buf,h);
accessor temp_acc(temp_buf,h,write_only);
// Create temporary local buffer
local_accessor<float, 1>
temp_buf_local(range<1>(workgroup_size), h);
h.parallel_for(nd_range<1>(range<1>(n_groups * workgroup_size),
range<1>(workgroup_size)),
[=](nd_item<1> item_id) {
auto global_idx = item_id.get_global_id(0);
// 1. Initialization (transform part).
tf_init(item_id, global_idx, num_steps,
temp_buf_local, access_buf);
// 2. Reduce within work group
float local_result = brick_reduce(
item_id, global_idx, num_steps, temp_buf_local);
if (item_id.get_local_id(0) == 0) {
temp_acc[item_id.get_group(0)] = local_result;
}
});
});
// 2. global reduction
auto reduce_event = local_reduce_event;
if (n_groups > 1) {
auto countby2 = decltype(n_groups)(1);
do {
reduce_event = policy.queue().submit([&reduce_event, &temp_buf, &combine,
countby2, n_groups](handler& h) {
h.depends_on(reduce_event);
accessor temp_acc(temp_buf,h);
h.parallel_for(range<1>(n_groups), [=](item<1> item_id) {
auto global_idx = item_id.get_linear_id();
if (global_idx % (2 * countby2) == 0 &&
global_idx + countby2 < n_groups) {
temp_acc[global_idx] =
combine(temp_acc[global_idx], temp_acc[global_idx + countby2]);
}
});
});
countby2 *= 2;
} while (countby2 < n_groups);
}
host_accessor answer(temp_buf,read_only) ;
return answer[0]/(float)num_steps;
}
// onedpl_native4 fills a buffer with number 1...num_steps and then
// calls transform_init to calculate the slices and then
// does a reduction in two steps - global and then local.
template <typename Policy>
float calc_pi_onedpl_native4(size_t num_steps, int groups, Policy&& policy) {
std::vector<float> data(num_steps);
buffer<float, 1> buf2{data.data(), range<1>{num_steps}};
// fill buffer with 1...num_steps
policy.queue().submit([&](handler& h) {
accessor writeresult(buf2,h);
h.parallel_for(range<1>{num_steps},
[=](id<1> idx) { writeresult[idx[0]] = (float)idx[0]; });
});
policy.queue().wait();
using Functor2 = walk_n<Policy, slice_area>;
// The buffer has 1...num it at and now we will use that as an input
// to the slice structue which will calculate the area of each
// rectangle.
auto tf_init = transform_init<Policy, std::plus<float>,
Functor2>{
std::plus<float>(), Functor2{slice_area(num_steps)}};
auto combine = std::plus<float>();
auto brick_reduce = reduce<Policy, std::plus<float>, float>{
std::plus<float>()};
// get workgroup_size from the device
auto workgroup_size =
policy.queue()
.get_device()
.template get_info<info::device::max_work_group_size>();
// get number of compute units from device.
auto max_comp_u = policy.queue()
.get_device()
.template get_info<info::device::max_compute_units>();
auto n_groups = (num_steps - 1) / workgroup_size + 1;
// use the smaller of the number of workgroups device has or the
// number of steps/workgroups
n_groups = std::min(decltype(n_groups)(max_comp_u), n_groups);
// Create temporary global buffer to store temporary value
auto temp_buf = buffer<float, 1>(range<1>(n_groups));
// Reduce over each work_group
auto local_reduce_event =
policy.queue().submit([&buf2, &temp_buf, &brick_reduce, &tf_init,
num_steps, n_groups, workgroup_size](handler& h) {
// grab access to the previous input
accessor access_buf(buf2,h);
accessor temp_acc(temp_buf,h,write_only);
// Create temporary local buffer
local_accessor<float, 1>
temp_buf_local(range<1>(workgroup_size), h);
h.parallel_for(nd_range<1>(range<1>(n_groups * workgroup_size),
range<1>(workgroup_size)),
[=](nd_item<1> item_id) {
auto global_idx = item_id.get_global_id(0);
// 1. Initialization (transform part). Fill local
// memory
tf_init(item_id, global_idx, num_steps,
temp_buf_local, access_buf);
// 2. Reduce within work group
float local_result = brick_reduce(
item_id, global_idx, num_steps, temp_buf_local);
if (item_id.get_local_id(0) == 0) {
temp_acc[item_id.get_group(0)] = local_result;
}
});
});
// global reduction
auto reduce_event = local_reduce_event;
if (n_groups > 1) {
auto countby2 = decltype(n_groups)(1);
do {
reduce_event = policy.queue().submit([&reduce_event, &temp_buf, &combine,
countby2, n_groups](handler& h) {
h.depends_on(reduce_event);
accessor temp_acc(temp_buf,h);
h.parallel_for(range<1>(n_groups), [=](item<1> item_id) {
auto global_idx = item_id.get_linear_id();
if (global_idx % (2 * countby2) == 0 &&
global_idx + countby2 < n_groups) {
temp_acc[global_idx] =
combine(temp_acc[global_idx], temp_acc[global_idx + countby2]);
}
});
});
countby2 *= 2;
} while (countby2 < n_groups);
}
host_accessor answer(temp_buf,read_only) ;
return answer[0]/(float)num_steps;
}
// This function shows the use of two different oneAPI DPC++ Library calls.
// The first is a transform calls which will fill a buff with the
// calculations of each small rectangle. The second call is the reduce
// call which sums up the results of all the elements in the buffer.
template <typename Policy>
float calc_pi_onedpl_two_steps_lib(int num_steps, Policy&& policy) {
buffer<float> calc_values{num_steps};
auto calc_begin2 = oneapi::dpl::begin(calc_values);
auto calc_end2 = oneapi::dpl::end(calc_values);
// use oneAPI DPC++ Library call transform to fill the buffer with
// the area calculations for each rectangle.
std::transform(policy, oneapi::dpl::counting_iterator<int>(1),
oneapi::dpl::counting_iterator<int>(num_steps), calc_begin2,
[=](int i) {
float x = (((float)i - 0.5f) / (float)(num_steps));
return (4.0f / (1.0f + x * x));
});
policy.queue().wait();
// use the oneAPI DPC++ Library call to reduce the array using plus
float result =
std::reduce(policy, calc_begin2, calc_end2, 0.0f, std::plus<float>());
policy.queue().wait();
result = result / (float)num_steps;
return result;
}
// This function uses the oneAPI DPC++ Library call
// transform reduce. It does everything in one library
// call.
template <typename Policy>
float calc_pi_onedpl_onestep(int num_steps, Policy& policy) {
float step = 1.0f / (float)num_steps;
float total = std::transform_reduce(
policy, oneapi::dpl::counting_iterator<int>(1),
oneapi::dpl::counting_iterator<int>(num_steps), 0.0f, std::plus<float>(),
[=](int i) {
float x = (float)(((float)i - 0.5f) / (float(num_steps)));
return (4.0f / (1.0f + x * x));
});
total = total * (float)step;
return total;
}
////////////////////////////////////////////////////////////////////////
//
// Each MPI ranks compute the number Pi partially on target device using SYCL.
// The partial result of number Pi is returned in "results".
//
////////////////////////////////////////////////////////////////////////
void mpi_native(float* results, int rank_num, int num_procs,
long total_num_steps, queue& q) {
float dx, dx2;
dx = 1.0f / (float)total_num_steps;
dx2 = dx / 2.0f;
// exception handler
//
// The exception_list parameter is an iterable list of std::exception_ptr
// objects. But those pointers are not always directly readable. So, we
// rethrow the pointer, catch it, and then we have the exception itself.
// Note: depending upon the operation there may be several exceptions.
// auto exception_handler = [&](exception_list exceptionList) {
// for (std::exception_ptr const& e : exceptionList) {
// try {
// std::rethrow_exception(e);
// } catch (sycl::exception const& e) {
// std::cout << "Failure"
// << "\n";
// std::terminate();
// }
// }
// };
try {
// The size of amount of memory that will be given to the buffer.
range<1> num_items{total_num_steps / size_t(num_procs)};
// Buffers are used to tell SYCL which data will be shared between the host
// and the devices.
buffer<float, 1> results_buf(results,
range<1>(total_num_steps / size_t(num_procs)));
// Submit takes in a lambda that is passed in a command group handler
// constructed at runtime.
q.submit([&](handler& h) {
// Accessors are used to get access to the memory owned by the buffers.
accessor results_accessor(results_buf,h,write_only);
// Each kernel calculates a partial of the number Pi in parallel.
h.parallel_for(num_items, [=](id<1> k) {
float x = ((float)rank_num / (float)num_procs) + (float)k * dx + dx2;
results_accessor[k] = (4.0f * dx) / (1.0f + x * x);
});
});
} catch (...) {
std::cout << "Failure" << std::endl;
}
}
// This function uses the oneAPI DPC++ Library call transform reduce.
// It does everything in one library call.
template <typename Policy>
float mpi_onedpl_onestep(int id, int num_procs, long total_num_steps,
Policy& policy) {
int num_step_per_rank = total_num_steps / num_procs;
float step = 1.0f / (float)total_num_steps;
float total = std::transform_reduce(
policy, oneapi::dpl::counting_iterator<int>(1),
oneapi::dpl::counting_iterator<int>(num_step_per_rank), 0.0f,
std::plus<float>(), [=](int i) {
float x = ((float)id / (float)num_procs) + i * step - step / 2;
return (4.0f / (1.0f + x * x));
});
total = total * (float)step;
return total;
}
int main(int argc, char** argv) {
int num_steps = 1000000;
int groups = 10000;
char machine_name[MPI_MAX_PROCESSOR_NAME];
int name_len=0;
int id=0;
int num_procs=0;
float pi=0.0;
queue myQueue{property::queue::in_order()};
auto policy = oneapi::dpl::execution::make_device_policy(
queue(default_selector_v));
// Start MPI.
if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
std::cout << "Failed to initialize MPI\n";
exit(-1);
}
// Create the communicator, and retrieve the number of MPI ranks.
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
// Determine the rank number.
MPI_Comm_rank(MPI_COMM_WORLD, &id);
// Get the machine name.
MPI_Get_processor_name(machine_name, &name_len);
std::cout << "Rank #" << id << " runs on: " << machine_name
<< ", uses device: "
<< myQueue.get_device().get_info<info::device::name>() << "\n";
if (id == master) {
printf("Number of steps is %d\n", num_steps);
pi = calc_pi_onedpl_native(num_steps, policy);
pi = calc_pi_onedpl_native2(num_steps, policy, groups);
pi = calc_pi_onedpl_native3(num_steps, groups, policy);
pi = calc_pi_onedpl_native4(num_steps, groups, policy);
pi = calc_pi_onedpl_two_steps_lib(num_steps, policy);
pi = calc_pi_onedpl_onestep(num_steps, policy);
dpc_common::TimeInterval T;
pi = calc_pi_cpu_seq(num_steps);
auto stop = T.Elapsed();
std::cout << "Cpu Seq calc: \t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop << " seconds\n";
dpc_common::TimeInterval T2;
pi = calc_pi_cpu_tbb(num_steps);
auto stop2 = T2.Elapsed();
std::cout << "Cpu TBB calc: \t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop2 << " seconds\n";
dpc_common::TimeInterval T3;
pi = calc_pi_onedpl_native(num_steps, policy);
auto stop3 = T3.Elapsed();
std::cout << "oneDPL native:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop3 << " seconds\n";
dpc_common::TimeInterval T3a;
pi = calc_pi_onedpl_native2(num_steps, policy, groups);
auto stop3a = T3a.Elapsed();
std::cout << "oneDPL native2:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop3a << " seconds\n";
dpc_common::TimeInterval T3b;
pi = calc_pi_onedpl_native3(num_steps, groups, policy);
auto stop3b = T3b.Elapsed();
std::cout << "oneDPL native3:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop3b << " seconds\n";
dpc_common::TimeInterval T3c;
pi = calc_pi_onedpl_native4(num_steps, groups, policy);
auto stop3c = T3c.Elapsed();
std::cout << "oneDPL native4:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop3c << " seconds\n";
dpc_common::TimeInterval T4;
pi = calc_pi_onedpl_two_steps_lib(num_steps, policy);
auto stop4 = T4.Elapsed();
std::cout << "oneDPL two steps:\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop4 << " seconds\n";
dpc_common::TimeInterval T5;
pi = calc_pi_onedpl_onestep(num_steps, policy);
auto stop5 = T5.Elapsed();
std::cout << "oneDPL transform_reduce: ";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop5 << " seconds\n";
}
int num_step_per_rank = num_steps / num_procs;
float* results_per_rank = new float[num_step_per_rank];
// Initialize an array to store a partial result per rank.
for (size_t i = 0; i < num_step_per_rank; i++) results_per_rank[i] = 0.0;
dpc_common::TimeInterval T6;
// Calculate the Pi number partially by multiple MPI ranks.
mpi_native(results_per_rank, id, num_procs, num_steps, myQueue);
float local_sum = 0.0;
// Use the oneAPI DPC++ Library call to reduce the array using plus
buffer<float> calc_values(results_per_rank, num_step_per_rank);
auto calc_begin2 = oneapi::dpl::begin(calc_values);
auto calc_end2 = oneapi::dpl::end(calc_values);
local_sum =
std::reduce(policy, calc_begin2, calc_end2, 0.0f, std::plus<float>());
// Master rank performs a reduce operation to get the sum of all partial Pi.
MPI_Reduce(&local_sum, &pi, 1, MPI_FLOAT, MPI_SUM, master, MPI_COMM_WORLD);
if (id == master) {
auto stop6 = T6.Elapsed();
std::cout << "mpi native:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop6 << " seconds\n";
}
delete[] results_per_rank;
// mpi_onedpl_onestep
dpc_common::TimeInterval T7;
local_sum = mpi_onedpl_onestep(id, num_procs, num_steps, policy);
auto stop7 = T7.Elapsed();
// Master rank performs a reduce operation to get the sum of all partial Pi.
MPI_Reduce(&local_sum, &pi, 1, MPI_FLOAT, MPI_SUM, master, MPI_COMM_WORLD);
if (id == master) {
std::cout << "mpi transform_reduce:\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop7 << " seconds\n";
std::cout << "success\n";
}
MPI_Finalize();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/ParallelPatterns/histogram/src/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// oneDPL headers should be included before standard headers
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <sycl/sycl.hpp>
#include <iostream>
#include <random>
// Dense algorithm stores all the bins, even if bin has 0 entries
// input array [4,4,1,0,1,2]
// output [(0,1) (1,2)(2,1)(3,0)(4,2)]
// On the other hand, the sparse algorithm excludes the zero-bin values
// i.e., for the sparse algorithm, the same input will give the following output
// [(0,1) (1,2)(2,1)(4,2)]
void dense_histogram(std::vector<uint64_t> &input) {
const int N = input.size();
sycl::buffer<uint64_t> histogram_buf{input.data(), sycl::range<1>(N)};
// Combine the equal values together
std::sort(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(histogram_buf), oneapi::dpl::end(histogram_buf));
// num_bins is maximum value + 1
int num_bins;
{
sycl::host_accessor histogram(histogram_buf, sycl::read_only);
num_bins = histogram[N - 1] + 1;
}
sycl::buffer<uint64_t> histogram_new_buf{sycl::range<1>(num_bins)};
auto val_begin = oneapi::dpl::counting_iterator<int>{0};
// Determine the end of each bin of value
oneapi::dpl::upper_bound(
oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(histogram_buf),
oneapi::dpl::end(histogram_buf), val_begin, val_begin + num_bins,
oneapi::dpl::begin(histogram_new_buf));
// Compute histogram by calculating differences of cumulative histogram
std::adjacent_difference(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(histogram_new_buf),
oneapi::dpl::end(histogram_new_buf),
oneapi::dpl::begin(histogram_new_buf));
std::cout << "success for Dense Histogram:\n";
{
sycl::host_accessor histogram_new(histogram_new_buf, sycl::read_only);
std::cout << "[";
for (int i = 0; i < num_bins; i++) {
std::cout << "(" << i << ", " << histogram_new[i] << ") ";
}
std::cout << "]\n";
}
}
void sparse_histogram(std::vector<uint64_t> &input) {
const int N = input.size();
sycl::buffer<uint64_t> histogram_buf{input.data(), sycl::range<1>(N)};
// Combine the equal values together
std::sort(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(histogram_buf), oneapi::dpl::end(histogram_buf));
// Create new buffer to store the unique valuesand their count;
// oneapi::dpl::reduce_by_segment requires a sufficient buffer size(for worst case).
// TODO: To consider usage of just 'sort' and 'transform_reduce' for calculate
// a final result.There is a guess that such approach is more effective.
sycl::buffer<uint64_t> histogram_values_buf{sycl::range<1>(N)};
sycl::buffer<uint64_t> histogram_counts_buf{sycl::range<1>(N)};
sycl::buffer<uint64_t> _const_buf{sycl::range<1>(N)};
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(_const_buf), oneapi::dpl::end(_const_buf), 1);
auto histogram_values_buf_begin = oneapi::dpl::begin(histogram_values_buf);
auto histogram_counts_buf_begin = oneapi::dpl::begin(histogram_counts_buf);
// Find the count of each value
const auto result = oneapi::dpl::reduce_by_segment(
oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(histogram_buf),
oneapi::dpl::end(histogram_buf), oneapi::dpl::begin(_const_buf),
histogram_values_buf_begin,
histogram_counts_buf_begin);
const auto num_bins = result.first - histogram_values_buf_begin;
assert(num_bins == result.second - histogram_counts_buf_begin);
std::cout << "success for Sparse Histogram:\n";
sycl::host_accessor histogram_value(histogram_values_buf, sycl::read_only);
sycl::host_accessor histogram_count(histogram_counts_buf, sycl::read_only);
std::cout << "[";
for (int i = 0; i < num_bins; i++) {
std::cout << "(" << histogram_value[i] << ", " << histogram_count[i]
<< ") ";
}
std::cout << "]\n";
}
int main(void) {
const int N = 1000;
std::vector<uint64_t> input;
srand((unsigned)time(0));
// initialize the input array with randomly generated values between 0 and 9
for (int i = 0; i < N; i++) input.push_back(rand() % 9);
// replacing all input entries of "4" with random number between 1 and 3
// this is to ensure that we have at least one entry with zero-bin size,
// which shows the difference between sparse and dense algorithm output
for (int i = 0; i < N; i++)
if (input[i] == 4) input[i] = rand() % 3;
dense_histogram(input);
sparse_histogram(input);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up
// 4
// in
// tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3
// in
// tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in
// tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in
// tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in
// tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in
// tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up
// 2
// in
// tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in
// tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4
// in
// tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in
// tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4
// in
// tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in
// tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in
// tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <helper_string.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dpct/dpct.hpp>
#include <sycl/sycl.hpp>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:4: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CUFFT_SUCCESS";
case 1:
return "CUFFT_INVALID_PLAN";
case 2:
return "CUFFT_ALLOC_FAILED";
case 3:
return "CUFFT_INVALID_TYPE";
case 4:
return "CUFFT_INVALID_VALUE";
case 5:
return "CUFFT_INTERNAL_ERROR";
case 6:
return "CUFFT_EXEC_FAILED";
case 7:
return "CUFFT_SETUP_FAILED";
case 8:
return "CUFFT_INVALID_SIZE";
case 9:
return "CUFFT_UNALIGNED_DATA";
case 10:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case 11:
return "CUFFT_INVALID_DEVICE";
case 12:
return "CUFFT_PARSE_ERROR";
case 13:
return "CUFFT_NO_WORKSPACE";
case 14:
return "CUFFT_NOT_IMPLEMENTED";
case 15:
return "CUFFT_LICENSE_ERROR";
case 16:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:5: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:7: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_554348 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128},
{0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128},
{0x70, 64}, {0x72, 64}, {0x75, 64}, {0x80, 64}, {0x86, 128},
{0x87, 128}, {0x89, 128}, {0x90, 128}, {-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char *_ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_876740 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char *name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"}, {0x32, "Kepler"}, {0x35, "Kepler"},
{0x37, "Kepler"}, {0x50, "Maxwell"}, {0x52, "Maxwell"},
{0x53, "Maxwell"}, {0x60, "Pascal"}, {0x61, "Pascal"},
{0x62, "Pascal"}, {0x70, "Volta"}, {0x72, "Xavier"},
{0x75, "Turing"}, {0x80, "Ampere"}, {0x86, "Ampere"},
{0x87, "Ampere"}, {0x89, "Ada"}, {0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:9: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:10: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:11: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID,
_ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:12: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf =
(uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
} catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:14: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID,
_ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:15: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:16: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:17: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major,
minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version && minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Samples/4_CUDA_Libraries/oceanFFT/oceanFFT.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
FFT-based Ocean simulation
based on original code by Yury Uralsky and Calvin Lin
This sample demonstrates how to use CUFFT to synthesize and
render an ocean surface in real-time.
See Jerry Tessendorf's Siggraph course notes for more details:
http://tessendorf.org/reports.html
It also serves as an example of how to generate multiple vertex
buffer streams from CUDA and render them using GLSL shaders.
*/
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#define WINDOWS_LEAN_AND_MEAN
#define NOMINMAX
#include <windows.h>
#endif
// includes
#include <helper_cuda.h>
#include <helper_functions.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dpct/dpct.hpp>
#include <dpct/fft_utils.hpp>
#include <sycl/sycl.hpp>
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
const char *sSDKsample = "CUDA FFT Ocean Simulation";
#define SYCLRT_SQRT_HALF_F 0.707106781f
#define MAX_EPSILON 0.10f
#define THRESHOLD 0.15f
#define REFRESH_DELAY 10 // ms
////////////////////////////////////////////////////////////////////////////////
// constants
unsigned int windowW = 512, windowH = 512;
const unsigned int meshSize = 256;
const unsigned int spectrumW = meshSize + 4;
const unsigned int spectrumH = meshSize + 1;
bool animate = true;
// FFT data
dpct::fft::fft_engine_ptr fftPlan;
sycl::float2 *d_h0 = 0; // heightfield at time 0
sycl::float2 *h_h0 = 0;
sycl::float2 *d_ht = 0; // heightfield at time t
sycl::float2 *d_slope = 0;
// pointers to device object
float *g_hptr = NULL;
sycl::float2 *g_sptr = NULL;
// simulation parameters
const float g = 9.81f; // gravitational constant
const float A = 1e-7f; // wave scale factor
const float patchSize = 100; // patch size
float windSpeed = 100.0f;
float windDir = 3.141592654F / 3.0f;
float dirDepend = 0.07f;
StopWatchInterface *timer = NULL;
float animTime = 0.0f;
float prevTime = 0.0f;
float animationRate = -0.001f;
// Auto-Verification Code
const int frameCheckNumber = 4;
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
unsigned int frameCount = 0;
unsigned int g_TotalErrors = 0;
////////////////////////////////////////////////////////////////////////////////
// kernels
extern "C" void cudaGenerateSpectrumKernel(sycl::float2 *d_h0,
sycl::float2 *d_ht,
unsigned int in_width,
unsigned int out_width,
unsigned int out_height,
float animTime, float patchSize);
extern "C" void cudaUpdateHeightmapKernel(float *d_heightMap,
sycl::float2 *d_ht,
unsigned int width,
unsigned int height, bool autoTest);
extern "C" void cudaCalculateSlopeKernel(float *h, sycl::float2 *slopeOut,
unsigned int width,
unsigned int height);
////////////////////////////////////////////////////////////////////////////////
// forward declarations
void runAutoTest(int argc, char **argv);
// rendering callbacks
void timerEvent(int value);
// Cuda functionality
void runCudaTest(char *exec_path);
void generate_h0(sycl::float2 *h0);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
printf(
"NOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n\n");
// check for command line arguments
if (checkCmdLineFlag(argc, (const char **)argv, "qatest")) {
animate = false;
fpsLimit = frameCheckNumber;
runAutoTest(argc, argv);
} /* else {
printf(
"[%s]\n\n"
"Left mouse button - rotate\n"
"Middle mouse button - pan\n"
"Right mouse button - zoom\n"
"'w' key - toggle wireframe\n",
sSDKsample);
runGraphicsTest(argc, argv);
}*/
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
void runAutoTest(int argc, char **argv) {
printf("%s Starting...\n\n", argv[0]);
// Cuda init
// int dev = findCudaDevice(argc, (const char **)argv);
int dev = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
dpct::device_info deviceProp;
checkCudaErrors(
(dpct::dev_mgr::instance().get_device(dev).get_device_info(deviceProp),
0));
// checkCudaErrors(DPCT_CHECK_ERROR(
// dpct::dev_mgr::instance().get_device(dev).get_device_info(deviceProp)));
printf("Compute capability %d.%d\n", deviceProp.get_major_version(),
deviceProp.get_minor_version());
// create FFT plan
checkCudaErrors(DPCT_CHECK_ERROR(
fftPlan = dpct::fft::fft_engine::create(
&dpct::get_default_queue(), meshSize, meshSize,
dpct::fft::fft_type::complex_float_to_complex_float)));
// allocate memory
int spectrumSize = spectrumW * spectrumH * sizeof(sycl::float2);
checkCudaErrors(
DPCT_CHECK_ERROR(d_h0 = (sycl::float2 *)sycl::malloc_device(
spectrumSize, dpct::get_default_queue())));
h_h0 = (sycl::float2 *)malloc(spectrumSize);
generate_h0(h_h0);
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memcpy(d_h0, h_h0, spectrumSize).wait()));
int outputSize = meshSize * meshSize * sizeof(sycl::float2);
checkCudaErrors(DPCT_CHECK_ERROR(d_ht = (sycl::float2 *)sycl::malloc_device(
outputSize, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(d_slope = (sycl::float2 *)sycl::malloc_device(
outputSize, dpct::get_default_queue())));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
prevTime = sdkGetTimerValue(&timer);
runCudaTest(argv[0]);
printf("Processing time : %f (ms)\n", sdkGetTimerValue(&timer));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_ht, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_slope, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_h0, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(dpct::fft::fft_engine::destroy(fftPlan)));
free(h_h0);
exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
float urand() { return rand() / (float)RAND_MAX; }
// Generates Gaussian random number with mean 0 and standard deviation 1.
float gauss() {
float u1 = urand();
float u2 = urand();
if (u1 < 1e-6f) {
u1 = 1e-6f;
}
return sqrtf(-2 * logf(u1)) * cosf(2 * 3.141592654F * u2);
}
// Phillips spectrum
// (Kx, Ky) - normalized wave vector
// Vdir - wind angle in radians
// V - wind speed
// A - constant
float phillips(float Kx, float Ky, float Vdir, float V, float A,
float dir_depend) {
float k_squared = Kx * Kx + Ky * Ky;
if (k_squared == 0.0f) {
return 0.0f;
}
// largest possible wave from constant wind of velocity v
float L = V * V / g;
float k_x = Kx / sqrtf(k_squared);
float k_y = Ky / sqrtf(k_squared);
float w_dot_k = k_x * cosf(Vdir) + k_y * sinf(Vdir);
float phillips = A * expf(-1.0f / (k_squared * L * L)) /
(k_squared * k_squared) * w_dot_k * w_dot_k;
// filter out waves moving opposite to wind
if (w_dot_k < 0.0f) {
phillips *= dir_depend;
}
// damp out waves with very small length w << l
// float w = L / 10000;
// phillips *= expf(-k_squared * w * w);
return phillips;
}
// Generate base heightfield in frequency space
void generate_h0(sycl::float2 *h0) {
for (unsigned int y = 0; y <= meshSize; y++) {
for (unsigned int x = 0; x <= meshSize; x++) {
float kx =
(-(int)meshSize / 2.0f + x) * (2.0f * 3.141592654F / patchSize);
float ky =
(-(int)meshSize / 2.0f + y) * (2.0f * 3.141592654F / patchSize);
float P = sqrtf(phillips(kx, ky, windDir, windSpeed, A, dirDepend));
if (kx == 0.0f && ky == 0.0f) {
P = 0.0f;
}
// float Er = urand()*2.0f-1.0f;
// float Ei = urand()*2.0f-1.0f;
float Er = gauss();
float Ei = gauss();
float h0_re = Er * P * SYCLRT_SQRT_HALF_F;
float h0_im = Ei * P * SYCLRT_SQRT_HALF_F;
int i = y * spectrumW + x;
h0[i].x() = h0_re;
h0[i].y() = h0_im;
}
}
}
void runCudaTest(char *exec_path) {
checkCudaErrors(
DPCT_CHECK_ERROR(g_hptr = sycl::malloc_device<float>(
meshSize * meshSize, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(g_sptr = sycl::malloc_device<sycl::float2>(
meshSize * meshSize, dpct::get_default_queue())));
// generate wave spectrum in frequency domain
cudaGenerateSpectrumKernel(d_h0, d_ht, spectrumW, meshSize, meshSize,
animTime, patchSize);
// execute inverse FFT to convert to spatial domain
checkCudaErrors(
DPCT_CHECK_ERROR((fftPlan->compute<sycl::float2, sycl::float2>(
d_ht, d_ht, dpct::fft::fft_direction::backward))));
// update heightmap values
cudaUpdateHeightmapKernel(g_hptr, d_ht, meshSize, meshSize, true);
{
float *hptr = (float *)malloc(meshSize * meshSize * sizeof(float));
dpct::get_default_queue()
.memcpy((void *)hptr, (void *)g_hptr,
meshSize * meshSize * sizeof(float))
.wait();
sdkDumpBin((void *)hptr, meshSize * meshSize * sizeof(float),
"spatialDomain.bin");
if (!sdkCompareBin2BinFloat("spatialDomain.bin", "ref_spatialDomain.bin",
meshSize * meshSize, MAX_EPSILON, THRESHOLD,
exec_path)) {
g_TotalErrors++;
}
free(hptr);
}
// calculate slope for shading
cudaCalculateSlopeKernel(g_hptr, g_sptr, meshSize, meshSize);
{
sycl::float2 *sptr =
(sycl::float2 *)malloc(meshSize * meshSize * sizeof(sycl::float2));
dpct::get_default_queue()
.memcpy((void *)sptr, (void *)g_sptr,
meshSize * meshSize * sizeof(sycl::float2))
.wait();
sdkDumpBin(sptr, meshSize * meshSize * sizeof(sycl::float2),
"slopeShading.bin");
if (!sdkCompareBin2BinFloat("slopeShading.bin", "ref_slopeShading.bin",
meshSize * meshSize * 2, MAX_EPSILON, THRESHOLD,
exec_path)) {
g_TotalErrors++;
}
free(sptr);
}
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(g_hptr, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(g_sptr, dpct::get_default_queue())));
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/Samples/4_CUDA_Libraries/oceanFFT/oceanFFT_kernel.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///////////////////////////////////////////////////////////////////////////////
#include <dpct/dpct.hpp>
#include <dpct/fft_utils.hpp>
#include <sycl/sycl.hpp>
// Round a / b to nearest higher integer value
int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; }
// complex math functions
sycl::float2 conjugate(sycl::float2 arg) {
return sycl::float2(arg.x(), -arg.y());
}
sycl::float2 complex_exp(float arg) {
return sycl::float2(sycl::cos(arg), sycl::sin(arg));
}
sycl::float2 complex_add(sycl::float2 a, sycl::float2 b) {
return sycl::float2(a.x() + b.x(), a.y() + b.y());
}
sycl::float2 complex_mult(sycl::float2 ab, sycl::float2 cd) {
return sycl::float2(ab.x() * cd.x() - ab.y() * cd.y(),
ab.x() * cd.y() + ab.y() * cd.x());
}
// generate wave heightfield at time t based on initial heightfield and
// dispersion relationship
void generateSpectrumKernel(sycl::float2 *h0, sycl::float2 *ht,
unsigned int in_width, unsigned int out_width,
unsigned int out_height, float t, float patchSize,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int in_index = y * in_width + x;
unsigned int in_mindex =
(out_height - y) * in_width + (out_width - x); // mirrored
unsigned int out_index = y * out_width + x;
// calculate wave vector
sycl::float2 k;
k.x() = (-(int)out_width / 2.0f + x) * (2.0f * 3.141592654F / patchSize);
k.y() = (-(int)out_width / 2.0f + y) * (2.0f * 3.141592654F / patchSize);
// calculate dispersion w(k)
float k_len = sycl::sqrt(k.x() * k.x() + k.y() * k.y());
float w = sycl::sqrt(9.81f * k_len);
if ((x < out_width) && (y < out_height)) {
sycl::float2 h0_k = h0[in_index];
sycl::float2 h0_mk = h0[in_mindex];
// output frequency-space complex values
ht[out_index] =
complex_add(complex_mult(h0_k, complex_exp(w * t)),
complex_mult(conjugate(h0_mk), complex_exp(-w * t)));
// ht[out_index] = h0_k;
}
}
// update height map values based on output of FFT
void updateHeightmapKernel(float *heightMap, sycl::float2 *ht,
unsigned int width,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int i = y * width + x;
// cos(pi * (m1 + m2))
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].x() * sign_correction;
}
// update height map values based on output of FFT
void updateHeightmapKernel_y(float *heightMap, sycl::float2 *ht,
unsigned int width,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int i = y * width + x;
// cos(pi * (m1 + m2))
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].y() * sign_correction;
}
// generate slope by partial differences in spatial domain
void calculateSlopeKernel(float *h, sycl::float2 *slopeOut, unsigned int width,
unsigned int height,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int i = y * width + x;
sycl::float2 slope = sycl::float2(0.0f, 0.0f);
if ((x > 0) && (y > 0) && (x < width - 1) && (y < height - 1)) {
slope.x() = h[i + 1] - h[i - 1];
slope.y() = h[i + width] - h[i - width];
}
slopeOut[i] = slope;
}
// wrapper functions
extern "C" void cudaGenerateSpectrumKernel(sycl::float2 *d_h0,
sycl::float2 *d_ht,
unsigned int in_width,
unsigned int out_width,
unsigned int out_height,
float animTime, float patchSize) {
sycl::range<3> block(1, 8, 8);
sycl::range<3> grid(1, cuda_iDivUp(out_height, block[1]),
cuda_iDivUp(out_width, block[2]));
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid * block, block), [=](sycl::nd_item<3> item_ct1) {
generateSpectrumKernel(d_h0, d_ht, in_width, out_width, out_height,
animTime, patchSize, item_ct1);
});
}
extern "C" void cudaUpdateHeightmapKernel(float *d_heightMap,
sycl::float2 *d_ht,
unsigned int width,
unsigned int height, bool autoTest) {
sycl::range<3> block(1, 8, 8);
sycl::range<3> grid(1, cuda_iDivUp(height, block[1]),
cuda_iDivUp(width, block[2]));
if (autoTest) {
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid * block, block), [=](sycl::nd_item<3> item_ct1) {
updateHeightmapKernel_y(d_heightMap, d_ht, width, item_ct1);
});
} else {
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid * block, block), [=](sycl::nd_item<3> item_ct1) {
updateHeightmapKernel(d_heightMap, d_ht, width, item_ct1);
});
}
}
extern "C" void cudaCalculateSlopeKernel(float *hptr, sycl::float2 *slopeOut,
unsigned int width,
unsigned int height) {
sycl::range<3> block(1, 8, 8);
sycl::range<3> grid2(1, cuda_iDivUp(height, block[1]),
cuda_iDivUp(width, block[2]));
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid2 * block, block), [=](sycl::nd_item<3> item_ct1) {
calculateSlopeKernel(hptr, slopeOut, width, height, item_ct1);
});
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <memory>
#include <oneapi/ccl.hpp>
#include <sycl/sycl.hpp>
#include <unordered_map>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &get_kvs(
const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(
std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs> create_kvs(
const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr) ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() { delete _ccl_stream_ptr; };
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const { return _comm.rank(); }
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const { return _comm.size(); }
/// Return underlying native device, which was used in
/// oneapi::ccl::communicator
sycl::device get_device() const { return _comm.get_device().get_native(); }
/// \brief allreduce is a collective communication operation that performs the
/// global reduction operation
/// on values from all ranks of communicator and distributes the result
/// back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced \param recv_buf [out] the buffer to store reduced
/// result, must have the same dimension as @c send_buf \param count the
/// number of elements of type @c dtype in @c send_buf and @c recv_buf \param
/// dtype the datatype of elements in @c send_buf and @c recv_buf \param rtype
/// the type of the reduction operation to be applied \param queue_ptr a
/// sycl::queue ptr associated with the operation \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the
/// communicator and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c
/// recv_buf \param dtype the datatype of elements in @c send_buf and @c
/// recv_buf \param root the rank that gets the result of reduction \param
/// rtype the type of the reduction operation to be applied \param queue_ptr a
/// sycl::queue ptr associated with the operation \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts
/// data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that
/// performs the global reduction operation
/// on values from all ranks of the communicator and scatters the
/// result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced \param recv_buf [out] the buffer to store reduced
/// result, must have the same dimension as @c send_buf \param recv_count the
/// number of elements of type @c dtype in receive block \param dtype the
/// datatype of elements in @c send_buf and @c recv_buf \param rtype the type
/// of the reduction operation to be applied \param queue_ptr a sycl::queue
/// ptr associated with the operation \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if (!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr =
new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr), _imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh) {
cgh.host_task([=] {
_imp->_ccl_event_impl.wait();
delete _imp;
});
});
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <cassert>
#include <complex>
#include <cstdint>
#include <sycl/sycl.hpp>
#include <type_traits>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT
__attribute__((noduplicate)) T
__spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT
__attribute__((noduplicate)) T
__spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T,
unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT
__attribute__((noduplicate)) T
__spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T>
class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints>
struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T>
struct DataType {
using T2 = T;
};
template <typename T>
struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size,
direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size,
direction)
.wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32) return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T>
inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a) return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T>
inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask, sycl::sub_group g, T x,
int remote_local_id, int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x,
logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask, sycl::sub_group g, T x,
unsigned int delta, int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result =
__spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask, sycl::sub_group g, T x,
unsigned int delta, int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask, sycl::sub_group g, T x,
unsigned int mask, int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size)
? start_index + target_offset
: id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x,
logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void nd_range_barrier(
const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void nd_range_barrier(
const sycl::nd_item<1> &item,
sycl::atomic_ref<unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf) num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ? &get_default_queue() : reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params, typename R,
typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type =
std::tuple_element_t<account_for_default_params<i>(), std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i - 1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i - 1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra) return nullptr;
for (; (std::size_t)*extra != 0; ++extra) {
if ((std::size_t)*extra == 1) {
return static_cast<char *>(*(extra + 1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params), args_buffer(get_args_buffer(extra)) {}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i> *>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i> *>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
class image_channel;
class image_wrapper_base;
namespace detail {
/// Image object type traits, with accessor type and sampled data type defined.
/// The data type of an image accessor must be one of sycl::int4, sycl::uint4,
/// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits
/// channel width will be 32 bits. sycl::half is an exception.
template <class T>
struct image_trait {
using acc_data_t = sycl::vec<T, 4>;
template <int dimensions>
using accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image>;
template <int dimensions>
using array_accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image_array>;
using data_t = T;
using elem_t = T;
static constexpr image_channel_data_type data_type =
std::is_integral<T>::value
? (std::is_signed<T>::value ? image_channel_data_type::signed_int
: image_channel_data_type::unsigned_int)
: image_channel_data_type::fp;
static constexpr int channel_num = 1;
};
template <>
struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> {
using data_t = std::uint8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::uint16_t> : public image_trait<std::uint32_t> {
using data_t = std::uint16_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int8_t> : public image_trait<std::int32_t> {
using data_t = std::int8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int16_t> : public image_trait<std::int32_t> {
using data_t = std::int16_t;
using elem_t = data_t;
};
template <>
struct image_trait<char>
: public image_trait<typename std::conditional<
std::is_signed<char>::value, signed char, unsigned char>::type> {};
template <class T>
struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {};
template <class T>
struct image_trait<sycl::vec<T, 2>> : public image_trait<T> {
using data_t = sycl::vec<T, 2>;
static constexpr int channel_num = 2;
};
template <class T>
struct image_trait<sycl::vec<T, 3>> : public image_trait<sycl::vec<T, 4>> {
static constexpr int channel_num = 3;
};
template <class T>
struct image_trait<sycl::vec<T, 4>> : public image_trait<T> {
using data_t = sycl::vec<T, 4>;
static constexpr int channel_num = 4;
};
/// Functor to fetch data from read result of an image accessor.
template <class T>
struct fetch_data {
using return_t = typename image_trait<T>::data_t;
using acc_data_t = typename image_trait<T>::acc_data_t;
return_t operator()(acc_data_t &&original_data) {
return (return_t)original_data.r();
}
};
template <class T>
struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {};
template <class T>
struct fetch_data<sycl::vec<T, 2>> {
using return_t = typename image_trait<sycl::vec<T, 2>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g());
}
};
template <class T>
struct fetch_data<sycl::vec<T, 3>> : public fetch_data<sycl::vec<T, 4>> {};
template <class T>
struct fetch_data<sycl::vec<T, 4>> {
using return_t = typename image_trait<sycl::vec<T, 4>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g(), origin_data.b(),
origin_data.a());
}
};
/// Create image according with given type \p T and \p dims.
template <class T>
static image_wrapper_base *create_image_wrapper(int dims);
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims);
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel,
int dims);
} // namespace detail
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
/// Create image channel info according to template argument \p T.
template <class T>
static image_channel create() {
image_channel channel;
channel.set_channel_size(
detail::image_trait<T>::channel_num,
sizeof(typename detail::image_trait<T>::elem_t) * 8);
channel.set_channel_data_type(detail::image_trait<T>::data_type);
return channel;
}
image_channel() = default;
image_channel_data_type get_channel_data_type() { return _type; }
void set_channel_data_type(image_channel_data_type type) { _type = type; }
unsigned get_total_size() { return _total_size; }
unsigned get_channel_num() { return _channel_num; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
sycl::image_channel_type get_channel_type() const {
if (_channel_size == 4) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int32;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int32;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp32;
} else if (_channel_size == 2) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int16;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int16;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp16;
} else {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int8;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int8;
}
assert(false && "unexpected channel data kind and channel size");
return sycl::image_channel_type::signed_int32;
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
sycl::image_channel_order get_channel_order() const {
switch (_channel_num) {
case 1:
return sycl::image_channel_order::r;
case 2:
return sycl::image_channel_order::rg;
case 3:
return sycl::image_channel_order::rgb;
case 4:
return sycl::image_channel_order::rgba;
default:
return sycl::image_channel_order::r;
}
}
/// Get the size for each channel in bits.
unsigned get_channel_size() const { return _channel_size * 8; }
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num, unsigned channel_size) {
if (in_channel_num < _channel_num) return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions>
void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i) _range[i] = range[i];
_dims = dimensions;
}
template <int... DimIdx>
sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) {
return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...);
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image() {
return create_image<dimensions>(_channel);
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image(image_channel channel) {
return new sycl::image<dimensions>(
_host_data, channel.get_channel_order(), channel.get_channel_type(),
get_range(make_index_sequence<dimensions>()),
sycl::property::image::use_host_ptr());
}
/// Get channel info.
inline image_channel get_channel() { return _channel; }
/// Get range of the image.
sycl::range<3> get_range() {
return sycl::range<3>(_range[0], _range[1], _range[2]);
}
/// Get matrix dims.
inline int get_dims() { return _dims; }
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data) std::free(_host_data);
_host_data = nullptr;
}
};
using image_matrix_p = image_matrix *;
enum class image_data_type { matrix, linear, pitch, unsupport };
/// Image data info.
class image_data {
public:
image_data() { _type = image_data_type::unsupport; }
image_data(image_matrix_p matrix_data) { set_data(matrix_data); }
image_data(void *data_ptr, size_t x_size, image_channel channel) {
set_data(data_ptr, x_size, channel);
}
image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
set_data(data_ptr, x_size, y_size, pitch_size, channel);
}
void set_data(image_matrix_p matrix_data) {
_type = image_data_type::matrix;
_data = matrix_data;
_channel = matrix_data->get_channel();
}
void set_data(void *data_ptr, size_t x_size, image_channel channel) {
_type = image_data_type::linear;
_data = data_ptr;
_x = x_size;
_channel = channel;
}
void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
_type = image_data_type::pitch;
_data = data_ptr;
_x = x_size;
_y = y_size;
_pitch = pitch_size;
_channel = channel;
}
image_data_type get_data_type() const { return _type; }
void set_data_type(image_data_type type) { _type = type; }
void *get_data_ptr() const { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_x() const { return _x; }
void set_x(size_t x) { _x = x; }
size_t get_y() const { return _y; }
void set_y(size_t y) { _y = y; }
size_t get_pitch() const { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
image_channel get_channel() const { return _channel; }
void set_channel(image_channel channel) { _channel = channel; }
image_channel_data_type get_channel_data_type() {
return _channel.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_channel.set_channel_data_type(type);
}
unsigned get_channel_size() { return _channel.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _channel.set_channel_size(channel_num, channel_size);
}
unsigned get_channel_num() { return _channel.get_channel_num(); }
void set_channel_num(unsigned num) { return _channel.set_channel_num(num); }
sycl::image_channel_type get_channel_type() {
return _channel.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _channel.set_channel_type(type);
}
private:
image_data_type _type;
void *_data = nullptr;
size_t _x, _y, _pitch;
image_channel _channel;
};
/// Image sampling info, include addressing mode, filtering mode and
/// normalization info.
class sampling_info {
sycl::addressing_mode _addressing_mode = sycl::addressing_mode::clamp_to_edge;
sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest;
sycl::coordinate_normalization_mode _coordinate_normalization_mode =
sycl::coordinate_normalization_mode::unnormalized;
public:
sycl::addressing_mode get_addressing_mode() { return _addressing_mode; }
void set(sycl::addressing_mode addressing_mode) {
_addressing_mode = addressing_mode;
}
sycl::filtering_mode get_filtering_mode() { return _filtering_mode; }
void set(sycl::filtering_mode filtering_mode) {
_filtering_mode = filtering_mode;
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _coordinate_normalization_mode;
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_coordinate_normalization_mode = coordinate_normalization_mode;
}
bool is_coordinate_normalized() {
return _coordinate_normalization_mode ==
sycl::coordinate_normalization_mode::normalized;
}
void set_coordinate_normalization_mode(int is_normalized) {
_coordinate_normalization_mode =
is_normalized ? sycl::coordinate_normalization_mode::normalized
: sycl::coordinate_normalization_mode::unnormalized;
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
sycl::sampler get_sampler() {
return sycl::sampler(_coordinate_normalization_mode, _addressing_mode,
_filtering_mode);
}
};
/// Image base class.
class image_wrapper_base {
sampling_info _sampling_info;
image_data _data;
public:
virtual ~image_wrapper_base() = 0;
void attach(image_data data) { set_data(data); }
/// Attach matrix data to this class.
void attach(image_matrix *matrix) {
detach();
image_wrapper_base::set_data(image_data(matrix));
}
/// Attach matrix data to this class.
void attach(image_matrix *matrix, image_channel channel) {
attach(matrix);
image_wrapper_base::set_channel(channel);
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count) {
attach(ptr, count, get_channel());
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count, image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(ptr), count, channel));
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch) {
attach(data, x, y, pitch, get_channel());
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch,
image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(data), x, y, pitch, channel));
}
/// Detach data.
virtual void detach() {}
sampling_info get_sampling_info() { return _sampling_info; }
void set_sampling_info(sampling_info info) { _sampling_info = info; }
const image_data &get_data() { return _data; }
void set_data(image_data data) { _data = data; }
image_channel get_channel() { return _data.get_channel(); }
void set_channel(image_channel channel) { _data.set_channel(channel); }
image_channel_data_type get_channel_data_type() {
return _data.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_data.set_channel_data_type(type);
}
unsigned get_channel_size() { return _data.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _data.set_channel_size(channel_num, channel_size);
}
sycl::addressing_mode get_addressing_mode() {
return _sampling_info.get_addressing_mode();
}
void set(sycl::addressing_mode addressing_mode) {
_sampling_info.set(addressing_mode);
}
sycl::filtering_mode get_filtering_mode() {
return _sampling_info.get_filtering_mode();
}
void set(sycl::filtering_mode filtering_mode) {
_sampling_info.set(filtering_mode);
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _sampling_info.get_coordinate_normalization_mode();
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_sampling_info.set(coordinate_normalization_mode);
}
bool is_coordinate_normalized() {
return _sampling_info.is_coordinate_normalized();
}
void set_coordinate_normalization_mode(int is_normalized) {
_sampling_info.set_coordinate_normalization_mode(is_normalized);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
unsigned get_channel_num() { return _data.get_channel_num(); }
void set_channel_num(unsigned num) { return _data.set_channel_num(num); }
sycl::image_channel_type get_channel_type() {
return _data.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _data.set_channel_type(type);
}
sycl::sampler get_sampler() { return _sampling_info.get_sampler(); }
};
inline image_wrapper_base::~image_wrapper_base() {}
using image_wrapper_base_p = image_wrapper_base *;
template <class T, int dimensions, bool IsImageArray>
class image_accessor_ext;
/// Image class, wrapper of sycl::image.
template <class T, int dimensions, bool IsImageArray = false>
class image_wrapper : public image_wrapper_base {
sycl::image<dimensions> *_image = nullptr;
#ifndef DPCT_USM_LEVEL_NONE
std::vector<char> _host_buffer;
#endif
void create_image(sycl::queue q) {
auto &data = get_data();
if (data.get_data_type() == image_data_type::matrix) {
_image = static_cast<image_matrix_p>(data.get_data_ptr())
->create_image<dimensions>(data.get_channel());
return;
}
auto ptr = data.get_data_ptr();
auto channel = data.get_channel();
if (detail::get_pointer_attribute(q, ptr) ==
detail::pointer_access_attribute::device_only) {
#ifdef DPCT_USM_LEVEL_NONE
ptr = get_buffer(ptr)
.template get_access<sycl::access_mode::read_write>()
.get_pointer();
#else
auto sz = data.get_x();
if (data.get_data_type() == image_data_type::pitch)
sz *= channel.get_total_size() * data.get_y();
_host_buffer.resize(sz);
q.memcpy(_host_buffer.data(), ptr, sz).wait();
ptr = _host_buffer.data();
#endif
}
if constexpr (dimensions == 1) {
assert(data.get_data_type() == image_data_type::linear);
_image = new sycl::image<1>(
ptr, channel.get_channel_order(), channel.get_channel_type(),
sycl::range<1>(data.get_x() / channel.get_total_size()));
} else if constexpr (dimensions == 2) {
assert(data.get_data_type() == image_data_type::pitch);
_image = new sycl::image<2>(ptr, channel.get_channel_order(),
channel.get_channel_type(),
sycl::range<2>(data.get_x(), data.get_y()),
sycl::range<1>(data.get_pitch()));
} else {
throw std::runtime_error("3D image only support matrix data");
}
return;
}
public:
using acc_data_t = typename detail::image_trait<T>::acc_data_t;
using accessor_t =
typename image_accessor_ext<T,
IsImageArray ? (dimensions - 1) : dimensions,
IsImageArray>::accessor_t;
image_wrapper() { set_channel(image_channel::create<T>()); }
~image_wrapper() { detach(); }
/// Get image accessor.
accessor_t get_access(sycl::handler &cgh,
sycl::queue &q = get_default_queue()) {
if (!_image) create_image(q);
return accessor_t(*_image, cgh);
}
/// Detach data.
void detach() override {
if (_image) delete _image;
_image = nullptr;
}
};
/// Wrap sampler and image accessor together.
template <class T, int dimensions, bool IsImageArray = false>
class image_accessor_ext {
public:
using accessor_t =
typename detail::image_trait<T>::template accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 3>
typename std::enable_if<Available, data_t>::type read(float x, float y,
float z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1, class Coord2,
bool Available = dimensions == 3 &&
std::is_integral<Coord0>::value &&
std::is_integral<Coord1>::value &&
std::is_integral<Coord2>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y,
Coord2 z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(float x, float y) {
return detail::fetch_data<T>()(_img_acc.read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1,
bool Available = dimensions == 2 &&
std::is_integral<Coord0>::value &&
std::is_integral<Coord1>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) {
return detail::fetch_data<T>()(_img_acc.read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(float x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
/// Read data from accessor.
template <class CoordT,
bool Available = dimensions == 1 && std::is_integral<CoordT>::value>
typename std::enable_if<Available, data_t>::type read(CoordT x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
};
template <class T, int dimensions>
class image_accessor_ext<T, dimensions, true> {
public:
using accessor_t =
typename detail::image_trait<T>::template array_accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, float x,
float y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, int x,
int y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, float x) {
return detail::fetch_data<T>()(_img_acc[index].read(x, _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, int x) {
return detail::fetch_data<T>()(_img_acc[index].read(x, _sampler));
}
};
/// Create image wrapper according to image data and sampling info.
/// \return Pointer to image wrapper base class.
/// \param data Image data used to create image wrapper.
/// \param info Image sampling info used to create image wrapper.
/// \returns Pointer to base class of created image wrapper object.
static inline image_wrapper_base *create_image_wrapper(image_data data,
sampling_info info) {
image_channel channel;
int dims = 1;
if (data.get_data_type() == image_data_type::matrix) {
auto matrix = (image_matrix_p)data.get_data_ptr();
channel = matrix->get_channel();
dims = matrix->get_dims();
} else {
if (data.get_data_type() == image_data_type::pitch) {
dims = 2;
}
channel = data.get_channel();
}
if (auto ret = detail::create_image_wrapper(channel, dims)) {
ret->set_sampling_info(info);
ret->set_data(data);
return ret;
}
return nullptr;
}
namespace detail {
/// Create image according with given type \p T and \p dims.
template <class T>
static image_wrapper_base *create_image_wrapper(int dims) {
switch (dims) {
case 1:
return new image_wrapper<T, 1>();
case 2:
return new image_wrapper<T, 2>();
case 3:
return new image_wrapper<T, 3>();
default:
return nullptr;
}
}
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num,
int dims) {
switch (channel_num) {
case 1:
return create_image_wrapper<T>(dims);
case 2:
return create_image_wrapper<sycl::vec<T, 2>>(dims);
case 3:
return create_image_wrapper<sycl::vec<T, 3>>(dims);
case 4:
return create_image_wrapper<sycl::vec<T, 4>>(dims);
default:
return nullptr;
}
}
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel,
int dims) {
switch (channel.get_channel_type()) {
case sycl::image_channel_type::fp16:
return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims);
case sycl::image_channel_type::fp32:
return create_image_wrapper<float>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int8:
return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int16:
return create_image_wrapper<std::int16_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::signed_int32:
return create_image_wrapper<std::int32_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::unsigned_int8:
return create_image_wrapper<std::uint8_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::unsigned_int16:
return create_image_wrapper<std::uint16_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::unsigned_int32:
return create_image_wrapper<std::uint32_t>(channel.get_channel_num(),
dims);
default:
return nullptr;
}
}
} // namespace detail
} // namespace dpct
#endif // !__DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/kernel.hpp | //==---- kernel.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_KERNEL_HPP__
#define __DPCT_KERNEL_HPP__
#include <sycl/sycl.hpp>
#ifdef _WIN32
#include <windows.h>
#include <unordered_set>
#else
#include <dlfcn.h>
#endif
#if defined(__has_include) && __has_include(<filesystem>)
#include <filesystem>
#elif defined(__has_include) && __has_include(<experimental/filesystem>)
#include <experimental/filesystem>
#else
#error "SYCLomatic runtime requires C++ filesystem support"
#endif
#include <fstream>
#include <image.hpp>
#include <random>
namespace dpct {
typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &,
unsigned int, void **, void **);
struct kernel_function_info {
int max_work_group_size = 0;
};
static inline void get_kernel_function_info(kernel_function_info *kernel_info,
const void *function) {
kernel_info->max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
}
static inline kernel_function_info get_kernel_function_info(
const void *function) {
kernel_function_info kernel_info;
kernel_info.max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
return kernel_info;
}
namespace detail {
#if defined(__has_include) && __has_include(<filesystem>)
namespace fs = std::filesystem;
#else
namespace fs = std::experimental::filesystem;
#endif
/// Write data to temporary file and return absolute path to temporary file.
/// Temporary file is created in a temporary directory both of which have random
/// names with only the user having access permissions. Only one temporary file
/// will be created in the temporary directory.
static inline fs::path write_data_to_file(char const *const data, size_t size) {
std::error_code ec;
if (sizeof(size_t) >= sizeof(std::streamsize) &&
size > (std::numeric_limits<std::streamsize>::max)())
throw std::runtime_error("data file too large");
// random number generator
std::random_device dev;
std::mt19937 prng(dev());
std::uniform_int_distribution<uint64_t> rand(0);
// find temporary directory
auto tmp_dir = fs::temp_directory_path(ec);
if (ec) throw std::runtime_error("could not find temporary directory");
// create private directory
std::stringstream directory;
fs::path directory_path;
constexpr int max_attempts = 5;
int i;
for (i = 0; i < max_attempts; i++) {
directory << std::hex << rand(prng);
directory_path = tmp_dir / directory.str();
if (fs::create_directory(directory_path)) {
break;
}
}
if (i == max_attempts) throw std::runtime_error("could not create directory");
// only allow owner permissions to private directory
fs::permissions(directory_path, fs::perms::owner_all, ec);
if (ec) throw std::runtime_error("could not set directory permissions");
// random filename in private directory
std::stringstream filename;
filename << std::hex << rand(prng);
#ifdef _WIN32
auto filepath = directory_path / (filename.str() + ".dll");
#else
auto filepath = directory_path / filename.str();
#endif
// write data to temporary file
auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary);
if (outfile) {
// only allow program to write file
fs::permissions(filepath, fs::perms::owner_write, ec);
if (ec) throw std::runtime_error("could not set permissions");
outfile.write(data, size);
if (!outfile.good()) throw std::runtime_error("could not write data");
outfile.close();
// only allow program to read/execute file
fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec,
ec);
if (ec) throw std::runtime_error("could not set permissions");
} else
throw std::runtime_error("could not write data");
// check temporary file contents
auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary);
if (infile) {
bool mismatch = false;
size_t cnt = 0;
while (1) {
char c;
infile.get(c);
if (infile.eof()) break;
if (c != data[cnt++]) mismatch = true;
}
if (cnt != size || mismatch)
throw std::runtime_error("file contents not written correctly");
} else
throw std::runtime_error("could not validate file");
if (!filepath.is_absolute())
throw std::runtime_error("temporary filepath is not absolute");
return filepath;
}
static inline uint16_t extract16(unsigned char const *const ptr) {
uint16_t ret = 0;
ret |= static_cast<uint16_t>(ptr[0]) << 0;
ret |= static_cast<uint16_t>(ptr[1]) << 8;
return (ret);
}
static inline uint32_t extract32(unsigned char const *const ptr) {
uint32_t ret = 0;
ret |= static_cast<uint32_t>(ptr[0]) << 0;
ret |= static_cast<uint32_t>(ptr[1]) << 8;
ret |= static_cast<uint32_t>(ptr[2]) << 16;
ret |= static_cast<uint32_t>(ptr[3]) << 24;
return (ret);
}
static inline uint64_t extract64(unsigned char const *const ptr) {
uint64_t ret = 0;
ret |= static_cast<uint64_t>(ptr[0]) << 0;
ret |= static_cast<uint64_t>(ptr[1]) << 8;
ret |= static_cast<uint64_t>(ptr[2]) << 16;
ret |= static_cast<uint64_t>(ptr[3]) << 24;
ret |= static_cast<uint64_t>(ptr[4]) << 32;
ret |= static_cast<uint64_t>(ptr[5]) << 40;
ret |= static_cast<uint64_t>(ptr[6]) << 48;
ret |= static_cast<uint64_t>(ptr[7]) << 56;
return (ret);
}
static inline uint64_t get_lib_size(char const *const blob) {
#ifdef _WIN32
///////////////////////////////////////////////////////////////////////
// Analyze DOS stub
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
if (ublob[0] != 0x4d || ublob[1] != 0x5a) {
throw std::runtime_error("Blob is not a Windows DLL.");
}
uint32_t pe_header_offset = extract32(ublob + 0x3c);
///////////////////////////////////////////////////////////////////////
// Ananlyze PE-header
unsigned char const *const pe_header = ublob + pe_header_offset;
// signature
uint32_t pe_signature = extract32(pe_header + 0);
if (pe_signature != 0x00004550) {
throw std::runtime_error("PE-header signature is not 0x00004550");
}
// machine
uint16_t machine = extract16(pe_header + 4);
if (machine != 0x8664) {
throw std::runtime_error("Only DLLs for x64 supported");
}
// number of sections
uint16_t number_of_sections = extract16(pe_header + 6);
// sizeof optional header
uint16_t sizeof_optional_header = extract16(pe_header + 20);
// magic
uint16_t magic = extract16(pe_header + 24);
if (magic != 0x10b && magic != 0x20b) {
throw std::runtime_error("MAGIC is not 0x010b or 0x020b");
}
///////////////////////////////////////////////////////////////////////
// Analyze tail of optional header
constexpr int coff_header_size = 24;
unsigned char const *const tail_of_optional_header =
pe_header + coff_header_size + sizeof_optional_header;
if (extract64(tail_of_optional_header - 8) != 0) {
throw std::runtime_error("Optional header not zero-padded");
}
///////////////////////////////////////////////////////////////////////
// Analyze last section header
constexpr int section_header_size = 40;
unsigned char const *const last_section_header =
tail_of_optional_header + section_header_size * (number_of_sections - 1);
uint32_t sizeof_raw_data = extract32(last_section_header + 16);
uint32_t pointer_to_raw_data = extract32(last_section_header + 20);
return sizeof_raw_data + pointer_to_raw_data;
#else
if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F')
throw std::runtime_error("Blob is not in ELF format");
if (blob[4] != 0x02)
throw std::runtime_error("Only 64-bit headers are supported");
if (blob[5] != 0x01)
throw std::runtime_error("Only little-endian headers are supported");
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
uint64_t e_shoff = extract64(ublob + 0x28);
uint16_t e_shentsize = extract16(ublob + 0x3A);
uint16_t e_shnum = extract16(ublob + 0x3C);
return e_shoff + (e_shentsize * e_shnum);
#endif
}
#ifdef _WIN32
class path_lib_record {
public:
void operator=(const path_lib_record &) = delete;
~path_lib_record() {
for (auto entry : lib_to_path) {
FreeLibrary(static_cast<HMODULE>(entry.first));
fs::permissions(entry.second, fs::perms::owner_all);
fs::remove_all(entry.second.remove_filename());
}
}
static void record_lib_path(fs::path path, void *library) {
lib_to_path[library] = path;
}
static void remove_lib(void *library) {
auto path = lib_to_path[library];
std::error_code ec;
FreeLibrary(static_cast<HMODULE>(library));
fs::permissions(path, fs::perms::owner_all);
if (fs::remove_all(path.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
lib_to_path.erase(library);
}
private:
static inline std::unordered_map<void *, fs::path> lib_to_path;
};
#endif
} // namespace detail
class kernel_library {
public:
kernel_library() : ptr{nullptr} {}
kernel_library(void *ptr) : ptr{ptr} {}
operator void *() const { return ptr; }
private:
void *ptr;
#ifdef _WIN32
static inline detail::path_lib_record single_instance_to_trigger_destructor;
#endif
};
namespace detail {
static inline kernel_library load_dl_from_data(char const *const data,
size_t size) {
fs::path filename = write_data_to_file(data, size);
#ifdef _WIN32
void *so = LoadLibraryW(filename.wstring().c_str());
#else
void *so = dlopen(filename.c_str(), RTLD_LAZY);
#endif
if (so == nullptr) throw std::runtime_error("Failed to load kernel library");
#ifdef _WIN32
detail::path_lib_record::record_lib_path(filename, so);
#else
std::error_code ec;
// Windows DLL cannot be deleted while in use
if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
#endif
return so;
}
} // namespace detail
/// Load kernel library and return a handle to use the library.
/// \param [in] name The name of the library.
static inline kernel_library load_kernel_library(const std::string &name) {
std::ifstream ifs;
ifs.open(name, std::ios::in | std::ios::binary);
std::stringstream buffer;
buffer << ifs.rdbuf();
const std::string buffer_string = buffer.str();
return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size());
}
/// Load kernel library whose image is alreay in memory and return a handle to
/// use the library.
/// \param [in] image A pointer to the image in memory.
static inline kernel_library load_kernel_library_mem(char const *const image) {
const size_t size = detail::get_lib_size(image);
return detail::load_dl_from_data(image, size);
}
/// Unload kernel library.
/// \param [in,out] library Handle to the library to be closed.
static inline void unload_kernel_library(const kernel_library &library) {
#ifdef _WIN32
detail::path_lib_record::remove_lib(library);
#else
dlclose(library);
#endif
}
class kernel_function {
public:
kernel_function() : ptr{nullptr} {}
kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {}
operator void *() const { return ((void *)ptr); }
void operator()(sycl::queue &q, const sycl::nd_range<3> &range,
unsigned int a, void **args, void **extra) {
ptr(q, range, a, args, extra);
}
private:
dpct::kernel_functor ptr;
};
/// Find kernel function in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the kernel function.
static inline dpct::kernel_function get_kernel_function(
kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)),
(name + std::string("_wrapper")).c_str()));
#else
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
dlsym(library, (name + std::string("_wrapper")).c_str()));
#endif
if (fn == nullptr) throw std::runtime_error("Failed to get function");
return fn;
}
/// Invoke a kernel function.
/// \param [in] function kernel function.
/// \param [in] queue SYCL queue used to execute kernel
/// \param [in] groupRange SYCL group range
/// \param [in] localRange SYCL local range
/// \param [in] localMemSize The size of local memory required by the kernel
/// function.
/// \param [in] kernelParams Array of pointers to kernel arguments.
/// \param [in] extra Extra arguments.
static inline void invoke_kernel_function(dpct::kernel_function &function,
sycl::queue &queue,
sycl::range<3> groupRange,
sycl::range<3> localRange,
unsigned int localMemSize,
void **kernelParams, void **extra) {
function(queue, sycl::nd_range<3>(groupRange * localRange, localRange),
localMemSize, kernelParams, extra);
}
/// Find image wrapper in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the target image wrapper.
static inline dpct::image_wrapper_base_p get_image_wrapper(
dpct::kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::image_wrapper_base_p fn =
reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress(
static_cast<HMODULE>(static_cast<void *>(library)), name.c_str()));
#else
dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(
dlsym(library, name.c_str()));
#endif
if (fn == nullptr) throw std::runtime_error("Failed to get image");
return fn;
}
} // namespace dpct
#endif // __DPCT_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <limits.h>
#include <math.h>
#include <iostream>
#include <sycl/sycl.hpp>
template <class... Args>
class dpct_kernel_name;
template <int Arg>
class dpct_kernel_scalar;
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "kernel.hpp"
#include "math.hpp"
#include "memory.hpp"
#include "util.hpp"
#if defined(_MSC_VER)
#define __dpct_align__(n) __declspec(align(n))
#define __dpct_inline__ __forceinline
#else
#define __dpct_align__(n) __attribute__((aligned(n)))
#define __dpct_inline__ __inline__ __attribute__((always_inline))
#endif
#if defined(_MSC_VER)
#define __dpct_noinline__ __declspec(noinline)
#else
#define __dpct_noinline__ __attribute__((noinline))
#endif
#define DPCT_COMPATIBILITY_TEMP (600)
namespace dpct {
enum error_code { success = 0, default_error = 999 };
}
#define DPCT_CHECK_ERROR(expr) \
[&]() { \
try { \
expr; \
return dpct::success; \
} catch (std::exception const &e) { \
std::cerr << e.what() << std::endl; \
return dpct::default_error; \
} \
}()
#define DPCT_PI_F (3.14159274101257f)
#define DPCT_PI (3.141592653589793115998)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dnnl_utils.hpp | //==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DNNL_UTILS_HPP__
#define __DPCT_DNNL_UTILS_HPP__
#include <algorithm>
#include <list>
#include <oneapi/dnnl/dnnl.hpp>
#include <oneapi/dnnl/dnnl_sycl.hpp>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <oneapi/mkl.hpp>
#include <oneapi/mkl/rng/device.hpp>
#include <sycl/sycl.hpp>
#include <unordered_map>
#include "device.hpp"
#include "lib_common_utils.hpp"
#include "memory.hpp"
namespace dpct {
namespace dnnl {
/// Get concatenated library version as an integer.
static inline size_t get_version() {
const ::dnnl::version_t *ver = ::dnnl::version();
return ver->major * 1000 + ver->minor * 100 + ver->patch;
}
class engine_ext;
typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t;
/// An enum class representing memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class memory_format_tag { nchw, nhwc, nchw_blocked };
/// An enum class representing RNN data memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class rnn_memory_format_tag { tnc, ntc };
/// A class holding the description of an N-dimensions memory.
class memory_desc_ext {
::dnnl::memory::desc _desc;
public:
/// Convert dpct::library_data_t to dnnl::memory::data_type.
static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt);
/// Convert dnnl::memory::data_type to dpct::library_data_t.
static dpct::library_data_t to_dpct_library_data_t(
::dnnl::memory::data_type dt, unsigned block_size);
/// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag.
static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag);
memory_desc_ext() = default;
memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {}
memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {}
/// Setting a 4D memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h,
int w);
/// Setting a 3D RNN data memory with given parameters.
/// \param [in] tag RNN data format tag.
/// \param [in] dt Data type.
/// \param [in] t Number of sequence length.
/// \param [in] n Number of batch.
/// \param [in] c Height of input channel.
void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n,
int c);
/// Setting a 4D memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
/// \param [in] n_stride Stride between two continuous images.
/// \param [in] c_stride Stride between two continuous channels.
/// \param [in] h_stride Stride between two continuous rows.
/// \param [in] w_stride Stride between two continuous columns.
void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride,
int c_stride, int h_stride, int w_stride);
/// Setting a ND memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension. \param [in] strides Array of dimension ndims that
/// contain the stride of each memory dimension.
void set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]);
/// Setting a ND memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension.
void set(memory_format_tag tag, dpct::library_data_t dt, int ndims,
const int dims[]);
/// Getting a ::dnnl::memory::desc from a memory_desc_ext.
/// \returns The ::dnnl::memory::desc.
const ::dnnl::memory::desc &get_desc() const { return _desc; }
/// Setting holding desc with given dnnl memory descriptor.
void set_desc(::dnnl::memory::desc desc) { _desc = desc; }
/// Getting a size of a memory_desc_ext in bytes.
/// \returns The size.
size_t get_size() const { return _desc.get_size(); }
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
/// \param [out] n_stride Stride between two continuous images.
/// \param [out] c_stride Stride between two continuous channels.
/// \param [out] h_stride Stride between two continuous rows.
/// \param [out] w_stride Stride between two continuous columns.
void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w,
int *n_stride, int *c_stride, int *h_stride, int *w_stride) const;
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c,
int *h, int *w) const;
/// Getting parameters from a 3D RNN data memory.
/// \param [out] dt Data type.
/// \param [out] tag RNN data format tag.
/// \param [out] t Number of sequence length.
/// \param [out] n Number of batch.
/// \param [out] c Height of input channel.
void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n,
int *c) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
/// \param [out] strides Array of dimension requested_ndims that contain the
/// stride of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt, int *ndims,
int dims[], int strides[]) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims, int dims[]) const;
/// Getting dims from a ND memory.
/// \return The dims.
std::vector<int64_t> get_dims() const { return _desc.get_dims(); }
/// Getting strides from a ND memory.
/// \return The strides.
std::vector<int64_t> get_strides() const { return _desc.get_strides(); }
/// Getting element num from a ND memory.
/// \return The element number.
size_t get_element_num() const {
auto dims = _desc.get_dims();
if (dims.empty()) {
return 0;
}
size_t result = 1;
for (auto &dim : dims) {
result *= dim;
}
return result;
}
operator bool() const { return bool(_desc); }
memory_desc_ext &operator=(std::nullptr_t) {
_desc.reset(nullptr);
return *this;
}
};
/// A class holding description for an activation operation.
class activation_desc {
::dnnl::algorithm _alg;
float _alpha;
float _beta;
public:
/// Setting an activation descriptor with given parameters.
/// \param [in] alg Activation algorithm.
/// \param [in] alpha Value of alpha parameter.
void set(::dnnl::algorithm alg, float alpha) {
_alg = alg;
if (alg == ::dnnl::algorithm::eltwise_clip) {
_alpha = 0;
_beta = alpha;
} else {
_alpha = alpha;
}
}
/// Getting parameters form an activation descriptor.
/// \param [out] alg Activation algorithm.
/// \param [out] alpha Value of alpha parameter.
void get(::dnnl::algorithm *alg, float *alpha) const {
*alg = _alg;
if (_alg == ::dnnl::algorithm::eltwise_clip) {
*alpha = _beta;
} else {
*alpha = _alpha;
}
}
/// Setting the alpha parameter of an activation descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of an activation descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the algorithm parameter of an activation descriptor.
/// \param [in] alg Activation algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Getting the alpha parameter from an activation descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from an activation descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the algorithm parameter from an activation descriptor.
/// \param [out] alg Activation algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
};
/// A class holding description for a local response normalization operation.
class lrn_desc {
unsigned int _local_size;
float _alpha;
float _beta;
float _k;
public:
/// Setting a local response normalization descriptor with given parameters.
/// \param [in] local_size Value of local_size parameter.
/// \param [in] alpha Value of alpha parameter.
/// \param [in] beta Value of beta parameter.
/// \param [in] k Value of k parameter.
void set(unsigned int local_size, float alpha, float beta, float k) {
_local_size = local_size;
_alpha = alpha;
_beta = beta;
_k = k;
}
/// Getting parameters form a local response normalization descriptor.
/// \param [out] local_size Value of local_size parameter.
/// \param [out] alpha Value of alpha parameter.
/// \param [out] beta Value of beta parameter.
/// \param [out] k Value of k parameter.
void get(unsigned int *local_size, float *alpha, float *beta,
float *k) const {
*local_size = _local_size;
*alpha = _alpha;
*beta = _beta;
*k = _k;
}
/// Setting the local size parameter of a local response normalization
/// descriptor.
/// \param [in] local_size Value of local_size parameter.
void set_local_size(unsigned int local_size) { _local_size = local_size; }
/// Setting the alpha parameter of a local response normalization descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of a local response normalization descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the k parameter of a local response normalization descriptor.
/// \param [in] k Value of k parameter.
void set_k(float k) { _k = k; }
/// Getting the local size parameter from a local response normalization
/// descriptor.
/// \param [out] local_size Value of local_size parameter.
unsigned int get_local_size() const { return _local_size; }
/// Getting the alpha parameter from a local response normalization
/// descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from a local response normalization descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the k parameter from a local response normalization descriptor.
/// \param [out] k Value of k parameter.
float get_k() const { return _k; }
};
/// An enum class representing softmax algorithm.
enum class softmax_algorithm { normal, log };
/// An enum class representing softmax mode.
enum class softmax_mode { instance, channel };
/// A class holding description for a pooling operation.
class pooling_desc {
::dnnl::algorithm _alg;
std::vector<int64_t> _stride;
std::vector<int64_t> _kernel;
std::vector<int64_t> _padding;
public:
/// Setting a 2D pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] kernel_h Value of height of kernel.
/// \param [in] kernel_w Value of width of kernel.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h,
int padding_w, int stride_h, int stride_w) {
_alg = alg;
_stride = {stride_h, stride_w};
_kernel = {kernel_h, kernel_w};
_padding = {padding_h, padding_w};
}
/// Setting a ND pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] ndims Dimension of the pooling operation.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension. \param [in] stride Array of dimension ndims containing
/// the stride size of each dimension.
void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[],
int stride[]) {
_alg = alg;
_stride = std::vector<int64_t>(stride, stride + ndims);
_kernel = std::vector<int64_t>(kernel, kernel + ndims);
_padding = std::vector<int64_t>(padding, padding + ndims);
}
/// Getting parameters from a 2D pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] kernel_h Value of height of kernel.
/// \param [out] kernel_w Value of width of kernel.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h,
int *padding_w, int *stride_h, int *stride_w) const {
*alg = _alg;
*kernel_h = _kernel[0];
*kernel_w = _kernel[1];
*padding_h = _padding[0];
*padding_w = _padding[1];
*stride_h = _stride[0];
*stride_w = _stride[1];
}
/// Getting parameters from a ND pooling descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [out] padding Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] stride Array of dimension ndims containing the stride size of
/// each dimension.
void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims,
int kernel[], int padding[], int stride[]) const {
*alg = _alg;
*ndims = _stride.size();
for (int i = 0; i < requested_ndims; i++) {
kernel[i] = _kernel[i];
padding[i] = _padding[i];
stride[i] = _stride[i];
}
}
/// Setting the algorithm parameter of a pooling descriptor.
/// \param [in] alg Pooling algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Setting the stride parameter of a pooling descriptor.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set_stride(const std::vector<int64_t> &stride) { _stride = stride; }
/// Setting the kernel parameter of a pooling descriptor.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; }
/// Setting the padding parameter of a pooling descriptor.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension.
void set_padding(const std::vector<int64_t> &padding) { _padding = padding; }
/// Getting the algorithm parameter from a pooling descriptor.
/// \param [out] alg Pooling algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
/// Getting the stride parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _stride; }
/// Getting the kernel parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the kernel size of each
/// dimension.
const std::vector<int64_t> &get_kernel() const { return _kernel; }
/// Getting the padding parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _padding; }
/// Getting the output dimensions of a memory after 2D pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
*out_n = dims[0];
*out_c = dims[1];
*out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0];
*out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1];
}
/// Getting the output dimensions of a memory after ND pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] =
1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2];
}
}
};
/// An enum class representing reduction operations.
enum class reduction_op {
max,
min,
sum,
mul,
mean,
amax,
mul_no_zeros,
norm1,
norm2
};
/// An enum class representing batch normalization mode.
enum class batch_normalization_mode { per_activation, spatial };
/// An enum class representing batch normalization operations.
enum class batch_normalization_ops { none, activation, add_activation };
/// An enum class representing binary operations.
enum class binary_op { add, sub, mul, div, min, max, sqrt, neg };
/// An struct representing convolution algorithm infomation.
struct convolution_algorithm_info {
::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto;
int status = 0;
};
/// A class holding description for a convolution operation.
class convolution_desc {
std::vector<int64_t> _strides;
std::vector<int64_t> _dilates;
std::vector<int64_t> _paddings;
int _group_count = 1;
::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict;
public:
/// Setting a group count to be used in the convolution.
/// \param [in] group_count Value of group count.
void set_group_count(int group_count) { _group_count = group_count; }
/// Getting a group count specified in the given convolution descriptor.
/// \returns Value of group count.
int get_group_count() { return _group_count; }
/// Setting floating point math mode to be used in the convolution.
/// \param [in] math_mode Value of math_mode.
void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; }
/// Getting floating point math mode specified in the given convolution
/// descriptor. \returns Value of math mode.
::dnnl::fpmath_mode get_math_mode() { return _math_mode; }
/// Setting a 2D convolution descriptor with given parameters.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
/// \param [in] dilate_h Value of height of dilate.
/// \param [in] dilate_w Value of width of dilate.
void set(int padding_h, int padding_w, int stride_h, int stride_w,
int dilate_h, int dilate_w) {
_strides = {stride_h, stride_w};
_dilates = {dilate_h - 1, dilate_w - 1};
_paddings = {padding_h, padding_w};
}
/// Setting a ND convolution descriptor with given parameters.
/// \param [in] ndims Dimension of the convolution operation.
/// \param [in] paddings Array of dimension ndims containing the padding size
/// of each dimension. \param [in] strides Array of dimension ndims containing
/// the stride size of each dimension. \param [in] dilates Array of dimension
/// ndims containing the kernel size of each dimension.
void set(int ndims, int paddings[], int strides[], int dilates[]) {
_strides = std::vector<int64_t>(strides, strides + ndims);
_paddings = std::vector<int64_t>(paddings, paddings + ndims);
_dilates = std::vector<int64_t>(dilates, dilates + ndims);
for (auto &dilate : _dilates) {
dilate--;
}
}
/// Getting parameters from a 2D convolution descriptor.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
/// \param [out] dilate_h Value of height of dilate.
/// \param [out] dilate_w Value of width of dilate.
void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w,
int *dilate_h, int *dilate_w) const {
*dilate_h = _dilates[0];
*dilate_w = _dilates[1];
*padding_h = _paddings[0];
*padding_w = _paddings[1];
*stride_h = _strides[0];
*stride_w = _strides[1];
}
/// Getting parameters from a ND convolution descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given convolution descriptor.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] paddings Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] strides Array of dimension ndims containing the stride size
/// of each dimension. \param [out] dilates Array of dimension ndims
/// containing the dilate size of each dimension.
void get(int requested_ndims, int *ndims, int paddings[], int strides[],
int dilates[]) const {
*ndims = _strides.size();
for (int i = 0; i < requested_ndims; i++) {
dilates[i] = _dilates[i];
paddings[i] = _paddings[i];
strides[i] = _strides[i];
}
}
/// Getting the stride parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _strides; }
/// Getting the kernel parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the dilate size of each
/// dimension.
const std::vector<int64_t> &get_dilate() const { return _dilates; }
/// Getting the padding parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _paddings; }
/// Getting the output dimensions of a memory after 2D convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
*out_n = dims[0];
*out_c = weight_dims[0];
*out_h = 1 + (dims[2] + 2 * _paddings[0] -
(1 + (_dilates[0] * (weight_dims[2] - 1)))) /
_strides[0];
*out_w = 1 + (dims[3] + 2 * _paddings[1] -
(1 + (_dilates[1] * (weight_dims[3] - 1)))) /
_strides[1];
}
/// Getting the output dimensions of a memory after ND convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = weight_dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] -
(1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) /
_strides[i - 2];
}
}
convolution_desc &operator=(std::nullptr_t) {
return *this = convolution_desc();
}
operator bool() const {
return !(_strides.size() == 0 && _dilates.size() == 0 &&
_paddings.size() == 0);
}
};
/// An enum class representing rnn mode.
enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru };
/// An enum class representing rnn bias mode.
enum class rnn_bias_mode { none, single };
/// An enum class representing rnn direction.
enum class rnn_direction { unidirectional, bidirectional };
/// A class holding description for a RNN operation.
class rnn_desc {
rnn_mode _mode;
rnn_bias_mode _bias_mode;
rnn_direction _direction;
dpct::library_data_t _dt;
int _input_size;
int _hidden_size;
int _projection_size;
int _layer_size;
public:
void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction,
dpct::library_data_t dt, int input_size, int hidden_size,
int projection_size, int layer_size) {
_mode = mode;
_bias_mode = bias_mode;
_direction = direction;
_input_size = input_size;
_hidden_size = hidden_size;
_projection_size = projection_size;
_layer_size = layer_size;
_dt = dt;
}
void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction,
dpct::library_data_t *dt, int *input_size, int *hidden_size,
int *projection_size, int *layer_size) const {
*mode = _mode;
*bias_mode = _bias_mode;
*direction = _direction;
*input_size = _input_size;
*hidden_size = _hidden_size;
*projection_size = _projection_size;
*layer_size = _layer_size;
*dt = _dt;
}
};
/// A class holding description for a Dropout operation.
class dropout_desc {
struct dropout_desc_imp {
float _p = 0.5f;
unsigned long long _seed = 1;
void *_state = nullptr;
std::vector<std::uint8_t> _host_state;
rng_engine_t _rng_engine;
dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {}
};
std::shared_ptr<dropout_desc_imp> _imp;
void generate(sycl::queue *q, std::int64_t required_state_size,
std::int64_t num, void *buffer) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::event e_gen = oneapi::mkl::rng::generate(
oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p),
_imp->_rng_engine, num, (std::int32_t *)buffer);
sycl::event e_save = q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e_gen);
cgh.host_task([=] {
oneapi::mkl::rng::save_state(_imp->_rng_engine,
_imp->_host_state.data());
});
});
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size,
e_save);
#endif
}
public:
operator bool() const { return bool(_imp); }
dropout_desc &operator=(std::nullptr_t) {
_imp.reset();
return *this;
}
/// Initializing a dropout descriptor.
void init() { _imp = std::make_shared<dropout_desc_imp>(); }
/// Setting a dropout descriptor with given parameters.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void set(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
/// Getting parameters from a dropout descriptor.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void get(float *p, void **states, unsigned long long *seed) const noexcept {
*seed = _imp->_seed;
*states = _imp->_state;
*p = _imp->_p;
}
/// Getting the probability of value set to zero.
/// \returns Probability.
float get_probability() const noexcept { return _imp->_p; }
/// Restoreing a dropout descriptor from stored state.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void restore(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
friend class engine_ext;
};
namespace detail {
typedef std::string primitive_cache_key_type;
typedef std::list<primitive_cache_key_type> usage_list_type;
typedef struct {
::dnnl::primitive *primitive;
usage_list_type::iterator usage_it;
std::function<void(::dnnl::primitive *)> destructor;
sycl::event e;
} primitive_cache_value_type;
typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type>
cache_map_type;
// The primitive cache uses LRU replacement policy, and the default cache
// capacity is 1024.
class primitive_cache {
int _capacity = 1024;
usage_list_type usage;
cache_map_type cache_map;
void touch(cache_map_type::iterator it, sycl::event e = {},
bool update_event = false) {
if (it->second.usage_it != usage.begin()) {
const primitive_cache_key_type &key = it->first;
usage.erase(it->second.usage_it);
usage.push_front(key);
it->second.usage_it = usage.begin();
}
if (update_event) {
it->second.e = e;
}
}
void async_destruct_primitive(const primitive_cache_value_type &value) {
dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) {
cgh.depends_on(value.e);
cgh.host_task([=] { value.destructor(value.primitive); });
});
}
public:
::dnnl::primitive *get(const primitive_cache_key_type &key) {
auto it = cache_map.find(key);
if (it == cache_map.end()) {
return nullptr;
}
touch(it);
return it->second.primitive;
}
void put(const primitive_cache_key_type &key, ::dnnl::primitive *value,
std::function<void(::dnnl::primitive *)> destructor, sycl::event e) {
auto it = cache_map.find(key);
if (it != cache_map.end()) {
touch(it, e, true);
} else {
if (cache_map.size() == _capacity) {
auto last_primitive = cache_map.find(usage.back());
async_destruct_primitive(last_primitive->second);
cache_map.erase(usage.back());
usage.pop_back();
}
usage.push_front(key);
cache_map[key] = {value, usage.begin(), destructor, e};
}
}
~primitive_cache() {
for (auto &v : cache_map) {
async_destruct_primitive(v.second);
}
}
};
} // namespace detail
/// A class holding the oneDNN engine.
class engine_ext {
struct output_argument_info {
float _alpha;
float _beta;
int _name;
memory_desc_ext _desc;
void *_data;
output_argument_info(float alpha, float beta, int name,
memory_desc_ext desc, void *data)
: _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {}
output_argument_info(float alpha, float beta, memory_desc_ext desc,
void *data)
: _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {}
};
::dnnl::engine _eng;
::dnnl::stream _s;
sycl::queue *_q = nullptr;
std::map<void *, ::dnnl::memory> workspace_map;
std::int64_t _random_engine_state_size = -1;
detail::primitive_cache _primitive_cache;
::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; }
void insert_workspace(void *key, ::dnnl::memory workspace) {
workspace_map[key] = workspace;
}
const ::dnnl::stream &get_stream() const { return _s; }
const ::dnnl::engine &get_engine() const { return _eng; }
void *allocate(const memory_desc_ext &desc, int count = 1) const;
::dnnl::memory::desc compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc);
::dnnl::memory::desc get_bn_scale_bias_mean_var_desc(
const ::dnnl::memory::desc &desc, batch_normalization_mode mode);
sycl::event batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var);
sycl::event batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var);
::dnnl::memory::desc transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc);
::dnnl::memory::desc bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches);
::dnnl::memory::desc transfer_memory_desc_to_format_tag_any(
const ::dnnl::memory::desc &desc) {
return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(),
::dnnl::memory::format_tag::any);
}
void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc,
void *&from,
::dnnl::memory::desc &to_desc,
void *&to,
std::vector<void *> &caches) {
if (from_desc != to_desc) {
to = allocate(to_desc);
caches.push_back(to);
async_reorder(1.f, from_desc, from, 0.f, to_desc, to);
}
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive(args_type &&...args);
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive_with_pd(const typename primitive_type::primitive_desc &pd);
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc create_primitive_desc(
args_type &&...args);
template <typename primitive_desc_type>
std::string generate_cache_key(const primitive_desc_type &pd);
void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) {
ss.write((char *)dims.data(), dims.size() * sizeof(int64_t));
};
void serialize_mem_desc(std::stringstream &ss,
const ::dnnl::memory::desc &desc) {
if (desc.is_zero()) {
return;
}
auto format_kind = desc.get_format_kind();
ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type()
<< (std::uint8_t)format_kind;
serialize_dims(ss, desc.get_dims());
serialize_dims(ss, desc.get_strides());
if (format_kind == ::dnnl::memory::format_kind::blocked) {
ss << desc.get_inner_nblks();
serialize_dims(ss, desc.get_inner_blks());
serialize_dims(ss, desc.get_inner_idxs());
}
};
sycl::event execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size,
int gate_num, int projection_size, std::vector<void *> &data,
std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr,
size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr);
sycl::event rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query);
sycl::event execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num);
void async_free(sycl::queue *q, sycl::event e,
std::unordered_map<int, ::dnnl::memory> *args,
std::vector<void *> device_ptrs = {}) {
q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
if (args) {
delete args;
}
for (auto ptr : device_ptrs) {
if (ptr) {
sycl::free(ptr, *_q);
}
}
});
});
};
bool scale_parameter_preprocess(
const std::vector<output_argument_info> &args);
template <typename primitive_type>
sycl::event execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &extra_args = {},
const std::vector<void *> &device_ptrs = {});
template <typename T>
sycl::event fill_with_type(sycl::queue *q, void *src, const void *value,
size_t size_with_byte) {
return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value),
size_with_byte / sizeof(T));
}
template <typename T>
struct no_zero_op {
T operator()(T e) {
if (!e) {
return 1;
}
return e;
}
};
template <typename T>
void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst,
size_t num) {
std::transform(oneapi::dpl::execution::make_device_policy(*q),
static_cast<T *>(src), static_cast<T *>(src) + num,
static_cast<T *>(dst), no_zero_op<T>());
}
void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst);
::dnnl::memory::desc get_group_weight_desc(
int group_count, const memory_desc_ext &weight_desc);
void get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num);
public:
engine_ext() {}
operator bool() const { return bool(_eng) && bool(_s) && bool(_q); }
engine_ext &operator=(std::nullptr_t) {
_eng.reset(nullptr);
_s.reset(nullptr);
_q = nullptr;
return *this;
}
/// Creating oneDNN engine.
void create_engine() {
_eng = ::dnnl::sycl_interop::make_engine(
dpct::get_current_device(), dpct::get_current_device().get_context());
_s = ::dnnl::sycl_interop::make_stream(
_eng, dpct::get_current_device().default_queue());
_q = &dpct::get_current_device().default_queue();
}
/// Setting the user's SYCL queue for an oneDNN engine.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) {
if (!q) {
throw std::runtime_error("set_queue: pointer must not be nullptr.");
}
if (!_eng) {
throw std::runtime_error("set_queue: current engine is invalid.");
}
if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) {
throw std::runtime_error(
"set_queue: queue is mismatch with current engine context.");
}
_q = q;
_s = ::dnnl::sycl_interop::make_stream(_eng, *q);
}
/// Retrieving the user's SYCL queue set in the oneDNN engine.
/// \returns Pointer to the SYCL queue.
sycl::queue *get_queue() const { return _q; }
/// Setting all elements of a memory to a given value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
void fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
void scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified pooling function value.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
void pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value. \param [in] dst_desc Destination memory descriptor. \param [in] dst
/// Pointer to destination data. \param [in] diff_dst_desc Differential
/// destination memory descriptor. \param [in] diff_dst Pointer to
/// differential destination data. \param [in] src_desc Source memory
/// descriptor. \param [in] src Pointer to source data. \param [in] beta Value
/// to scaling factors used to scale the prior value in the differential
/// destination memory. \param [in] diff_src_desc Differential source memory
/// descriptor. \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
void lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Setting all elements of a memory to a given value asynchronously.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
/// \returns An event representing the fill operations.
sycl::event async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reorder operations.
sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor asynchronously.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
/// \returns An event representing the scale operations.
sycl::event async_scale(float alpha, const memory_desc_ext &src_desc,
void *src);
/// Adding the scaled values of a memory to another memory asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the sum operations.
sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified binary operation.
/// \param [in] alpha_0 Value to scaling factors used to scale the src_0
/// value.
/// \param [in] src_desc_0 Source 0 memory descriptor.
/// \param [in] src_0 Pointer to source 0 data.
/// \param [in] alpha_1 Value to scaling factors used to scale the src_1
/// value.
/// \param [in] src_desc_1 Source 1 memory descriptor.
/// \param [in] src_1 Pointer to source 1 data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the binary operations.
sycl::event async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified reduction operation.
/// \param [in] alpha Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reduction operations.
sycl::event async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing a specified activation function value asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the activation forward operations.
sycl::event async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the activation backward operations.
sycl::event async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value asynchronously.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation. \returns An event representing the pooling forward
/// operations.
sycl::event async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst,
::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
/// \returns An event representing the pooling backward operations.
sycl::event async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the softmax forward operations.
sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified softmax function asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the softmax backward operations.
sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value
/// asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the lrn forward operations.
sycl::event async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value. \param [in] dst_desc Destination memory descriptor. \param [in] dst
/// Pointer to destination data. \param [in] diff_dst_desc Differential
/// destination memory descriptor. \param [in] diff_dst Pointer to
/// differential destination data. \param [in] src_desc Source memory
/// descriptor. \param [in] src Pointer to source data. \param [in] beta Value
/// to scaling factors used to scale the prior value in the differential
/// destination memory. \param [in] diff_src_desc Differential source memory
/// descriptor. \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the lrn backward operations.
sycl::event async_lrn_backward(
lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] desc Derived memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] scale_bias_desc Derived scale and bias memory descriptor.
/// \param [out] mean_var_desc Derived mean and var memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode);
/// Get the size of workspace that needed by batch normalization. The data
/// stored in workspace must be preserved between forward and backward. \param
/// [in] ops Batch normalization operation mode. This mode can set to perform
/// only batch normalization, or batch normalization followed by activation,
/// or batch normalization followed by element-wise addition and activation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Size of workspace.
size_t get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean,
void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously. \param [in] mode Batch normalization mode. \param [in]
/// epsilon Epsilon value used in computation. \param [in] alpha_data Value to
/// scaling factors used to scale the computed data value. \param [in]
/// src_desc Source memory descriptor. \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior
/// value in the data memory. \param [in] diff_src_desc Differential source
/// memory descriptor. \param [out] diff_src Pointer to differential source
/// data. \param [in] alpha_param Value to scaling factors used to scale the
/// computed parameter value. \param [in] diff_scale_bias_mean_var_desc
/// Differential scale, bias, mean, variance memory descriptor. \param [in]
/// scale Pointer to scale data. \param [in] beta_param Value to scaling
/// factors used to scale the prior value in the parameter memory. \param [in]
/// diff_scale Pointer to differential scale data. \param [in] diff_bias
/// Pointer to differential bias data. \param [in] saved_mean Pointer to
/// optional cache saved mean data in forward. \param [in] saved_var Pointer
/// to optional cache saved variance data in forward. \returns An event
/// representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior
/// value in the data memory. \param [in] diff_src_desc Differential source
/// memory descriptor. \param [out] diff_src Pointer to differential source
/// data. \param [in] diff_summand_desc Differential summand memory
/// descriptor. \param [out] diff_summand Pointer to differential summand
/// data. \param [in] alpha_param Value to scaling factors used to scale the
/// computed parameter value. \param [in] diff_scale_bias_mean_var_desc
/// Differential scale, bias, mean, variance memory descriptor. \param [in]
/// scale Pointer to scale data. \param [in] bias Pointer to bias data. \param
/// [in] beta_param Value to scaling factors used to scale the prior value in
/// the parameter memory. \param [out] diff_scale Pointer to differential
/// scale data. \param [out] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in
/// forward. \param [in] saved_var Pointer to optional cache saved variance
/// data in forward. \param [in] workspace_size Size of workspace. \param [in]
/// workspace Pointer to workspace used for backward propagation. \returns An
/// event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior
/// value in the data memory. \param [in] diff_src_desc Differential source
/// memory descriptor. \param [out] diff_src Pointer to differential source
/// data. \param [in] diff_summand_desc Differential summand memory
/// descriptor. \param [out] diff_summand Pointer to differential summand
/// data. \param [in] alpha_param Value to scaling factors used to scale the
/// computed parameter value. \param [in] diff_scale_bias_desc Differential
/// scale, bias memory descriptor. \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior
/// value in the parameter memory. \param [out] diff_scale Pointer to
/// differential scale data. \param [out] diff_bias Pointer to differential
/// bias data. \param [in] mean_var_desc Differential mean, variance memory
/// descriptor. \param [in] saved_mean Pointer to optional cache saved mean
/// data in forward. \param [in] saved_var Pointer to optional cache saved
/// variance data in forward. \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward
/// operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean,
void *saved_var, size_t workspace_size, void *workspace);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] alpha_0 Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] alpha_1 Value to scaling factors used to scale the summand
/// value.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] bias_desc Bias memory descriptor.
/// \param [in] bias Pointer to bias data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the data gradient of a specified convolution function
/// asynchronously. \param [in] desc Convolution descriptor. \param [in] alg
/// Convolution algorithm. \param [in] alpha Value to scaling factors used to
/// scale the computed value. \param [in] weight_desc Weight memory
/// descriptor. \param [in] weight Pointer to weight data. \param [in]
/// diff_dst_desc Differential destination memory descriptor. \param [in]
/// diff_dst Pointer to differential destination data. \param [in] beta Value
/// to scaling factors used to scale the prior value in the destination
/// memory. \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the convolution backward data operations.
sycl::event async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing the weight gradient of a specified convolution function
/// asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_weight_desc Differential weight memory descriptor.
/// \param [out] diff_weight Pointer to differential weight data.
/// \returns An event representing the convolution backward weight operations.
sycl::event async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight);
/// Computing the bias gradient of a specified convolution function
/// asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_bias_desc Differential bias memory descriptor.
/// \param [out] diff_bias Pointer to differential bias data.
/// \returns An event representing the convolution backward bias operations.
sycl::event async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias);
/// Getting the required weight space size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [out] weight_space_size Size of required weight space.
void rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size);
/// Getting the required scratchpad size and workspace size for specified rnn
/// operation. \param [in] desc RNN descriptor. \param [in] kind Propagation
/// kind. \param [in] src_desc Source memory descriptor. \param [out]
/// scratchpad_size Size of required scratchpad. \param [out] workspace_size
/// Size of required workspace.
void rnn_get_scratchpad_workspace_size(const rnn_desc &desc,
::dnnl::prop_kind kind,
const memory_desc_ext &src_desc,
size_t *scratchpad_size,
size_t *workspace_size);
/// Computing a specified rnn function value asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] dst_iter Pointer to output recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] dst_c_iter Pointer to output recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn forward operations.
sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc,
void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc,
void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight,
size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Computing the data and weight gradient of a specified rnn function
/// asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] diff_dst_iter Pointer to differential output recurrent hidden
/// state data. \param [out] diff_src_iter Pointer to differential input
/// recurrent hidden state data. \param [in] iter_c_desc Recurrent cell state
/// data memory descriptor. \param [in] src_c_iter Pointer to input recurrent
/// cell state data. \param [in] diff_dst_c_iter Pointer to differential
/// output recurrent cell state data. \param [out] diff_src_c_iter Pointer to
/// differential input recurrent cell state data. \param [in] weight_size Size
/// of weight memory. \param [in] weight Pointer to weight data. \param [out]
/// diff_weight Pointer to differential weight data. \param [in]
/// scratchpad_size Size of scratchpad memory. \param [in] scratchpad Pointer
/// to scratchpad data. \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn backward operations.
sycl::event async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src,
void *diff_src, const memory_desc_ext &iter_desc, void *src_iter,
void *diff_dst_iter, void *diff_src_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Getting the required state size for specified dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of state.
size_t get_dropout_state_size();
/// Getting the required workspace size for dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of workspace.
static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc);
/// Computing a specified dropout function value asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout forward operations.
sycl::event async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
void *workspace, size_t workspace_size);
/// Computing the gradient of a specified dropout function asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout backward operations.
sycl::event async_dropout_backward(dropout_desc &desc,
const memory_desc_ext &diff_dst_desc,
void *diff_dst,
const memory_desc_ext &diff_src_desc,
void *diff_src, void *workspace,
size_t workspace_size);
};
inline void dropout_desc::restore(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error(
"restore: state_size less than required state size.");
}
sycl::queue *q = engine.get_queue();
_imp->_p = p;
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size)
.wait();
_imp->_rng_engine = oneapi::mkl::rng::load_state<rng_engine_t>(
*q, _imp->_host_state.data());
}
#endif
}
inline void dropout_desc::set(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
_imp->_p = p;
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("set: no sufficient memory to save states.");
}
sycl::queue *q = engine.get_queue();
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
_imp->_rng_engine = rng_engine_t(*q, seed);
oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data());
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size)
.wait();
}
#endif
}
inline ::dnnl::memory::data_type memory_desc_ext::to_dnnl_data_type(
dpct::library_data_t dt) {
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dpct::library_data_t::real_half:
return dnnl_dt::f16;
case dpct::library_data_t::real_bfloat16:
return dnnl_dt::bf16;
case dpct::library_data_t::real_float:
return dnnl_dt::f32;
case dpct::library_data_t::real_int32:
return dnnl_dt::s32;
case dpct::library_data_t::real_int8:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8:
return dnnl_dt::u8;
case dpct::library_data_t::real_int8_4:
return dnnl_dt::s8;
case dpct::library_data_t::real_int8_32:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8_4:
return dnnl_dt::u8;
default:
throw std::runtime_error("to_dnnl_data_type: unsupported data type.");
}
}
inline dpct::library_data_t memory_desc_ext::to_dpct_library_data_t(
::dnnl::memory::data_type dt, unsigned block_size) {
using dpct_dt = dpct::library_data_t;
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dnnl_dt::f16:
return dpct_dt::real_half;
case dnnl_dt::bf16:
return dpct_dt::real_bfloat16;
case dnnl_dt::f32:
return dpct_dt::real_float;
case dnnl_dt::s32:
return dpct_dt::real_int32;
case dnnl_dt::s8:
if (block_size == 4) {
return dpct_dt::real_int8_4;
} else if (block_size == 32) {
return dpct_dt::real_int8_32;
} else {
return dpct_dt::real_int8;
}
case dnnl_dt::u8:
if (block_size == 4) {
return dpct_dt::real_uint8_4;
} else {
return dpct_dt::real_uint8;
}
default:
throw std::runtime_error(
"to_dpct_library_data_t: unsupported data type "
"dnnl::memory::data_type::undef.");
}
}
inline ::dnnl::memory::format_tag memory_desc_ext::to_dnnl_format_tag(
dpct::library_data_t dt, memory_format_tag tag) {
using dpct_dt = dpct::library_data_t;
using dpct_tag = memory_format_tag;
using dnnl_tag = ::dnnl::memory::format_tag;
switch (tag) {
case dpct_tag::nchw:
return dnnl_tag::nchw;
case dpct_tag::nhwc:
return dnnl_tag::nhwc;
default:
if (dt == dpct_dt::real_int8_32) {
return dnnl_tag::nChw32c;
} else {
return dnnl_tag::nChw4c;
}
}
}
inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int n, int c, int h, int w) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h,
int w, int n_stride, int c_stride,
int h_stride, int w_stride) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
{n_stride, c_stride, h_stride, w_stride});
}
inline void memory_desc_ext::set(dpct::library_data_t dt, int ndims,
const int dims[], const int strides[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
{strides, strides + ndims});
}
inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int ndims, const int dims[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline void memory_desc_ext::set(rnn_memory_format_tag tag,
dpct::library_data_t dt, int t, int n, int c) {
if (tag == rnn_memory_format_tag::tnc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::tnc);
} else if (tag == rnn_memory_format_tag::ntc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::ntc);
} else {
throw std::runtime_error("set: unsupported memory format tag.");
}
}
inline void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c,
int *h, int *w, int *n_stride, int *c_stride,
int *h_stride, int *w_stride) const {
unsigned block_size = 1;
auto dims = _desc.get_dims();
auto inner_blks = _desc.get_inner_blks();
auto strides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
*n_stride = strides[0] / block_size;
*c_stride = strides[1] / block_size;
*h_stride = strides[2] / block_size;
*w_stride = strides[3] / block_size;
}
inline void memory_desc_ext::get(dpct::library_data_t *dt,
memory_format_tag *tag, int *n, int *c, int *h,
int *w) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
auto inner_blks = _desc.get_inner_blks();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (strides[1] == 1 && dims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
}
inline void memory_desc_ext::get(dpct::library_data_t *dt,
rnn_memory_format_tag *tag, int *t, int *n,
int *c) const {
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = rnn_memory_format_tag::tnc;
} else {
*tag = rnn_memory_format_tag::ntc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), 1);
*t = dims[0];
*n = dims[1];
*c = dims[2];
}
inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
int *ndims, int dims[], int strides[]) const {
unsigned block_size = 1;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
strides[index] = astrides[index] / block_size;
}
}
inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims,
int dims[]) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (astrides[1] == 1 && adims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
}
}
inline void engine_ext::get_rnn_configuration(
const ::dnnl::memory::desc &desc, rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt, ::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size, int *seq_length, int *batch_size,
int *direction_num, int *gate_num) {
if (!desc.is_zero()) {
auto dims = desc.get_dims();
auto strides = desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = ::dnnl::memory::format_tag::tnc;
*seq_length = dims[0];
*batch_size = dims[1];
} else {
*tag = ::dnnl::memory::format_tag::ntc;
*seq_length = dims[1];
*batch_size = dims[0];
}
}
if (direction == rnn_direction::bidirectional) {
*direction_num = 2;
} else {
*direction_num = 1;
}
if (mode == rnn_mode::lstm) {
*gate_num = 4;
} else if (mode == rnn_mode::gru) {
*gate_num = 3;
} else {
*gate_num = 1;
}
if (*projection_size != hidden_size) {
*output_size = *projection_size;
} else {
*projection_size = 0;
*output_size = hidden_size;
}
*dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt);
}
inline void *engine_ext::allocate(const memory_desc_ext &data_desc,
int count) const {
size_t mem_size = data_desc.get_size();
void *mem = sycl::malloc_device(mem_size * count, *_q);
return mem;
}
inline void engine_ext::transform_no_zero(const memory_desc_ext &desc,
void *src, void *dst) {
::dnnl::memory::data_type dt = desc.get_desc().get_data_type();
size_t element_num = desc.get_element_num();
switch (dt) {
case ::dnnl::memory::data_type::f32:
transform_no_zero_with_type<float>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::f16:
transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s32:
transform_no_zero_with_type<int32_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s8:
transform_no_zero_with_type<int8_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::u8:
transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num);
break;
default:
throw std::runtime_error("transform_no_zero: unsupported data type.");
}
}
inline ::dnnl::memory::desc engine_ext::get_group_weight_desc(
int group_count, const memory_desc_ext &weight_desc) {
if (group_count == 1) {
return weight_desc.get_desc();
}
auto help_weight_desc = weight_desc.get_desc();
int ndims = help_weight_desc.get_ndims();
if (!help_weight_desc.get_inner_blks().empty()) {
throw std::runtime_error(
"get_group_weight_desc: group convolution with "
"blocked weight memory unimplemented.");
}
std::vector<int64_t> new_size;
auto old_size = weight_desc.get_dims();
new_size.push_back(group_count);
new_size.push_back(old_size[0] / group_count);
for (int index = 1; index < old_size.size(); index++) {
new_size.push_back(old_size[index]);
}
std::vector<int64_t> strides = help_weight_desc.get_strides();
::dnnl::memory::format_tag tag;
bool is_nhwc = (strides[1] == 1 && old_size[1] != 1);
if (ndims == 4) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::gohwi;
} else {
tag = ::dnnl::memory::format_tag::goihw;
}
} else if (ndims == 5) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::godhwi;
} else {
tag = ::dnnl::memory::format_tag::goidhw;
}
}
help_weight_desc = ::dnnl::memory::desc(
new_size, weight_desc.get_desc().get_data_type(), tag);
return help_weight_desc;
}
inline ::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
auto inner_blks = desc.get_inner_blks();
assert(ndims >= 4 && "ndims is at least 4.");
std::vector<int64_t> compressed_dims(ndims);
compressed_dims[0] = dims[0];
compressed_dims[1] = dims[1];
for (int index = 2; index < ndims; index++) {
compressed_dims[1] = compressed_dims[1] * dims[index];
compressed_dims[index] = 1;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw4c);
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw32c);
}
std::vector<int64_t> strides(ndims, 1);
strides[0] = compressed_dims[1];
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides);
}
inline ::dnnl::memory::desc engine_ext::get_bn_scale_bias_mean_var_desc(
const ::dnnl::memory::desc &desc, batch_normalization_mode mode) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
assert(ndims >= 4 && "ndims is at least 4.");
int channel_num = 1;
if (mode == batch_normalization_mode::spatial) {
channel_num = dims[1];
} else {
for (int index = 1; index < ndims; index++) {
channel_num = channel_num * dims[index];
}
}
return ::dnnl::memory::desc({channel_num}, desc.get_data_type(),
::dnnl::memory::format_tag::a);
}
inline ::dnnl::memory::desc
engine_ext::transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc) {
if (!desc.get_inner_blks().empty()) {
return desc;
}
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
if (ndims == 4) {
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::nchw);
}
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::ncdhw);
}
/// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out +
/// beta * prior_dst) have no change. In this case this function returns true
/// means the operation can exit directly.
inline bool engine_ext::scale_parameter_preprocess(
const std::vector<output_argument_info> &args) {
bool direct_exit = true;
for (auto &arg : args) {
if (arg._alpha == 0.f) {
if (arg._beta != 1.f) {
async_scale(arg._beta, arg._desc, arg._data);
}
} else {
direct_exit = false;
}
}
return direct_exit;
}
inline void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode) {
derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode);
derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode);
}
inline void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode) {
int src_ndims = src_desc.get_desc().get_ndims();
auto inner_blks = src_desc.get_desc().get_inner_blks();
if (src_desc.get_desc().get_ndims() != 4 ||
src_desc.get_desc().get_ndims() != 5) {
throw std::runtime_error(
"derive_batch_normalization_memory_desc: only 4d "
"and 5d memory descriptor supported.");
}
std::vector<int64_t> dims = src_desc.get_dims();
dims[0] = 1;
if (mode == batch_normalization_mode::spatial) {
dims[2] = 1;
dims[3] = 1;
if (src_ndims == 5) {
dims[4] = 1;
}
}
auto data_type = src_desc.get_desc().get_data_type();
if (data_type == ::dnnl::memory::data_type::f16) {
data_type = ::dnnl::memory::data_type::f32;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw4c));
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw32c));
} else {
if (src_ndims == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nchw));
} else {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::ncdhw));
}
}
}
template <typename primitive_type>
sycl::event engine_ext::execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &output_args,
const std::vector<void *> &device_ptrs) {
std::vector<void *> caches;
int output_arg_num = output_args.size();
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
auto cache = allocate(output_args[i]._desc);
caches.push_back(cache);
args->insert(
{output_args[i]._name,
::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)});
} else {
args->insert(
{output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(),
_eng, output_args[i]._data)});
}
}
auto e = ::dnnl::sycl_interop::execute(
*(static_cast<primitive_type *>(primitive.second)), _s, *args);
_primitive_cache.put(
primitive.first, primitive.second,
[](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e);
int cache_index = 0;
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
e = async_sum(output_args[i]._alpha, output_args[i]._desc,
caches[cache_index++], output_args[i]._beta,
output_args[i]._desc, output_args[i]._data);
} else {
if (output_args[i]._alpha != 1.f) {
e = async_scale(output_args[i]._alpha, output_args[i]._desc,
output_args[i]._data);
}
}
}
caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end());
async_free(_q, e, args, caches);
return e;
}
inline ::dnnl::memory::desc
engine_ext::bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches) {
::dnnl::memory::desc result;
result = transfer_memory_desc_to_channel_major_format(desc);
if ((result != desc) || !src) {
*cache = allocate(desc);
if (is_input && src) {
async_reorder(1.f, desc, src, 0.f, result, *cache);
}
caches.push_back(*cache);
}
return result;
}
inline sycl::event engine_ext::batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) {
if (scale_parameter_preprocess(
{{alpha_data, beta_data, diff_src_desc, diff_src},
{alpha_param, beta_param, diff_scale_bias_desc, diff_scale},
{alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_diff_dst = nullptr,
*reordered_diff_src = nullptr, *reordered_scale = nullptr,
*reordered_bias = nullptr, *reordered_diff_scale = nullptr,
*reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr,
*reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_diff_scale_bias_desc =
diff_scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc;
::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_scale_bias_desc, scale, &reordered_scale, caches);
actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (bias) {
bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc,
bias, &reordered_bias, caches);
}
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_scale, &reordered_diff_scale,
caches);
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_bias, &reordered_diff_bias,
caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches);
bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_var, &reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
} else {
if ((help_src_desc != help_diff_dst_desc) ||
(help_src_desc != help_diff_src_desc) ||
(help_diff_dst_desc != help_diff_src_desc)) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
}
}
help_diff_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
auto forward_primitive =
create_primitive_desc<::dnnl::batch_normalization_forward>(
::dnnl::prop_kind::forward_training, help_src_desc,
help_diff_dst_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift);
auto primitive = create_primitive<::dnnl::batch_normalization_backward>(
::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc,
help_src_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift,
forward_primitive);
void *dst_cache = nullptr;
if (!saved_mean && !saved_var) {
dst_cache = allocate(diff_dst_desc);
if (!reordered_saved_mean) {
reordered_saved_mean = allocate(mean_var_desc);
caches.push_back(reordered_saved_mean);
}
if (!reordered_saved_var) {
reordered_saved_var = allocate(mean_var_desc);
caches.push_back(reordered_saved_var);
}
if (!bias) {
_q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size());
}
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc,
dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias,
mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr,
nullptr);
caches.push_back(dst_cache);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_diff_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(
help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean : saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_src_desc, _eng,
reordered_diff_dst ? reordered_diff_dst : diff_dst)}}};
sycl::event e = execute_primitive(
primitive, execution_args,
{{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc,
reordered_diff_src ? reordered_diff_src : diff_src},
{alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc,
reordered_diff_scale ? reordered_diff_scale : diff_scale},
{alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc,
reordered_diff_bias ? reordered_diff_bias : diff_bias}});
if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) {
e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f,
diff_src_desc, diff_src);
}
if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() &&
reordered_diff_scale && reordered_diff_bias) {
async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f,
diff_scale_bias_desc, diff_scale);
e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias,
0.f, diff_scale_bias_desc, diff_bias);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline sycl::event engine_ext::batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_dst = nullptr,
*reordered_scale = nullptr, *reordered_bias = nullptr,
*reordered_saved_mean = nullptr, *reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_dst_desc = help_dst_desc;
::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
help_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_scale_bias_desc, scale, &reordered_scale, caches);
bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias,
&reordered_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_mean, &reordered_saved_mean,
caches);
actual_mean_var_desc = help_mean_var_desc;
bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_var, &reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
} else {
if (help_src_desc != help_dst_desc) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
}
}
help_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
::dnnl::prop_kind kind;
::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift;
if (is_infer) {
kind = ::dnnl::prop_kind::forward_inference;
flag = ::dnnl::normalization_flags::use_global_stats | flag;
} else {
kind = ::dnnl::prop_kind::forward_training;
}
auto primitive = create_primitive<::dnnl::batch_normalization_forward>(
kind, help_src_desc, help_dst_desc, epsilon, flag);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_SHIFT,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_bias ? reordered_bias : bias)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(
help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean : saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(
help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}}};
sycl::event e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, help_dst_desc,
reordered_dst ? reordered_dst : dst}});
if (!is_infer && running_var) {
auto src_ndim = src_desc.get_desc().get_ndims();
auto src_dims = src_desc.get_dims();
int element_num = src_dims[0];
if (mode == batch_normalization_mode::spatial) {
for (int index = 2; index < src_ndim; index++) {
element_num *= src_dims[index];
}
}
float unbias_factor = element_num / (element_num - 1.f);
async_scale(1.f - factor, mean_var_desc, running_var);
e = async_sum(factor * unbias_factor, mean_var_desc,
reordered_saved_var ? reordered_saved_var : saved_var, 1.f,
mean_var_desc, running_var);
}
if (!is_infer && running_mean) {
e = async_sum(factor, mean_var_desc,
reordered_saved_mean ? reordered_saved_mean : saved_mean,
(1.f - factor), mean_var_desc, running_mean);
}
if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) {
e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst);
}
if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean &&
saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) {
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f,
mean_var_desc, saved_mean);
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f,
mean_var_desc, saved_var);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline sycl::event engine_ext::rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
std::vector<void *> data = {src, dst, src_iter,
dst_iter, src_iter_c, dst_iter_c,
weight, workspace, scratchpad};
std::vector<int> offset(6, 0);
void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr;
sycl::event e;
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
// Here to combine the oneDNN bidirectional_sum and
// bidirectional_concat config, so call execute_rnn_forward_primitive
// twice.
if (layer_size > 1) {
if (!is_get_execution_args) {
input_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
_q->memcpy(input_layer_cache, src, src_desc.get_size());
}
data[0] = input_layer_cache;
data[1] = hidden_layer_cache;
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, 1, direction_num, hidden_size, gate_num, projection_size,
data, offset, layer_size - 1, weight_size_query, workspace_size_query,
scratchpad_size_query);
data[0] =
((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache;
data[1] = dst;
}
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
2 * output_size, 1, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
} else {
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
}
if (is_get_execution_args) {
return e;
}
if (input_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(input_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline sycl::event engine_ext::execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num,
int projection_size, std::vector<void *> &data, std::vector<int> &offset,
int iter_num, size_t *weight_size, size_t *workspace_size,
size_t *scratchpad_size) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
std::unordered_map<int, ::dnnl::memory> *execution_args;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
}
if (weight_size) {
*weight_size +=
(weight_layer_desc.get_size() + weight_iter_desc.get_size() +
projection_desc.get_size() + bias_desc.get_size()) *
iter_num;
return e;
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
kind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::gru) {
auto pd = create_primitive_desc<::dnnl::gru_forward>(
kind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::lstm) {
auto pd = create_primitive_desc<::dnnl::lstm_forward>(
kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd);
key = r.first;
p = r.second;
}
}
for (int i = 0; i < iter_num; i++) {
void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr,
*dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]);
if (mode == rnn_mode::lstm) {
dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2];
}
if (!workspace_size) {
execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}},
{DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}},
{DNNL_ARG_SCRATCHPAD,
{::dnnl::memory(scratchpad_desc, _eng, data[8])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}});
offset += d.get_size();
};
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]);
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6],
offset[4]);
insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6],
offset[4]);
}
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size());
}
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]);
if (kind == ::dnnl::prop_kind::forward_training) {
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]);
}
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
execute_primitive<::dnnl::vanilla_rnn_forward>(
{key, static_cast<::dnnl::vanilla_rnn_forward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
execute_primitive<::dnnl::gru_forward>(
{key, static_cast<::dnnl::gru_forward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
execute_primitive<::dnnl::lstm_forward>(
{key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[0], data[1]);
}
}
if (kind == ::dnnl::prop_kind::forward_training) {
if (workspace_size) {
*workspace_size +=
(src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size());
if (mode == rnn_mode::lstm) {
*workspace_size += iter_c_desc.get_size();
}
} else {
_q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache,
src_desc.get_size());
offset[5] += src_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache,
dst_desc.get_size());
offset[5] += dst_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache,
iter_desc.get_size());
offset[5] += iter_desc.get_size();
if (mode == rnn_mode::lstm) {
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache,
iter_c_desc.get_size());
offset[5] += iter_c_desc.get_size();
}
}
}
}
return e;
}
inline sycl::event engine_ext::execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training;
::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc diff_weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc diff_weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc projection_desc, diff_projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
diff_projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldoi);
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
fkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>(
bkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::gru) {
auto fpd = create_primitive_desc<::dnnl::gru_forward>(
fkind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::gru_backward>(
bkind, direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::lstm) {
auto fpd = create_primitive_desc<::dnnl::lstm_forward>(
fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
auto pd = create_primitive_desc<::dnnl::lstm_backward>(
bkind, direction, src_desc, iter_desc, iter_c_desc,
diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(),
diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc,
src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc,
::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc,
iter_c_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd);
key = r.first;
p = r.second;
}
for (int i = 0; i < iter_num; i++) {
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}},
{DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}},
{DNNL_ARG_SCRATCHPAD,
{::dnnl::memory(scratchpad_desc, _eng, data[15])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
offset += d.get_size();
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}});
};
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
}
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]);
insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]);
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]);
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6],
offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]);
insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]);
insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]);
}
insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]);
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size());
}
if (projection_size) {
insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14],
offset[8]);
}
insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14],
offset[8]);
insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14],
offset[8]);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
e = execute_primitive<::dnnl::vanilla_rnn_backward>(
{key, static_cast<::dnnl::vanilla_rnn_backward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
e = execute_primitive<::dnnl::gru_backward>(
{key, static_cast<::dnnl::gru_backward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
e = execute_primitive<::dnnl::lstm_backward>(
{key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[8], data[9]);
}
}
return e;
}
#define GENERATE_RNN_PRIMITIVE_KEY(name) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \
<< (std::uint8_t)pd.get_algorithm(); \
serialize_mem_desc(ss, pd.src_layer_desc()); \
serialize_mem_desc(ss, pd.src_iter_desc()); \
serialize_mem_desc(ss, pd.dst_layer_desc()); \
serialize_mem_desc(ss, pd.dst_iter_desc()); \
serialize_mem_desc(ss, pd.diff_src_layer_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_desc()); \
serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \
serialize_mem_desc(ss, pd.src_iter_c_desc()); \
serialize_mem_desc(ss, pd.dst_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \
return ss.str(); \
}
#define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_algorithm() \
<< (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \
<< (std::uint8_t)pd.get_group_size(); \
serialize_dims(ss, pd.get_strides()); \
serialize_dims(ss, pd.get_dilations()); \
serialize_dims(ss, pd.get_padding_l()); \
serialize_mem_desc(ss, pd.src_desc()); \
serialize_mem_desc(ss, pd.diff_src_desc()); \
serialize_mem_desc(ss, pd.dst_desc()); \
serialize_mem_desc(ss, pd.diff_dst_desc()); \
serialize_mem_desc(ss, pd.query_type()); \
serialize_mem_desc(ss, pd.weights_desc()); \
serialize_mem_desc(ss, pd.diff_weights_desc()); \
return ss.str(); \
}
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward)
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_forward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_backward)
GENERATE_RNN_PRIMITIVE_KEY(gru_forward)
GENERATE_RNN_PRIMITIVE_KEY(gru_backward)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc)
template <typename primitive_desc_type>
std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) {
std::stringstream ss;
auto kind = pd.get_kind();
ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind()
<< (std::uint8_t)pd.get_algorithm();
serialize_mem_desc(ss, pd.src_desc());
serialize_mem_desc(ss, pd.diff_src_desc());
serialize_mem_desc(ss, pd.dst_desc());
serialize_mem_desc(ss, pd.diff_dst_desc());
switch (kind) {
case ::dnnl::primitive::kind::batch_normalization:
ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags();
case ::dnnl::primitive::kind::reduction:
ss << pd.get_p();
break;
case ::dnnl::primitive::kind::eltwise:
ss << pd.get_alpha() << pd.get_beta();
case ::dnnl::primitive::kind::lrn:
ss << pd.get_k();
break;
case ::dnnl::primitive::kind::pooling:
serialize_dims(ss, pd.get_strides());
serialize_dims(ss, pd.get_dilations());
serialize_dims(ss, pd.get_padding_l());
serialize_dims(ss, pd.get_kernel());
break;
case ::dnnl::primitive::kind::softmax:
ss << pd.get_axis();
break;
default:
break;
}
return ss.str();
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive(args_type &&...args) {
auto pd =
create_primitive_desc<primitive_type>(std::forward<args_type>(args)...);
return create_primitive_with_pd<primitive_type>(pd);
}
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive_with_pd(
const typename primitive_type::primitive_desc &pd) {
detail::primitive_cache_key_type key = generate_cache_key(pd);
primitive_type *p = (primitive_type *)_primitive_cache.get(key);
if (!p) {
p = new primitive_type(pd);
}
return {key, p};
}
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc engine_ext::create_primitive_desc(
args_type &&...args) {
return typename primitive_type::primitive_desc(
_eng, std::forward<args_type>(args)...);
}
inline void engine_ext::fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
async_fill(src_desc, src, valuePtr).wait();
}
inline void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline void engine_ext::scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
async_scale(alpha, src_desc, src).wait();
}
inline void engine_ext::sum(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline void engine_ext::activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline void engine_ext::activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src)
.wait();
}
inline void engine_ext::pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst,
workspace)
.wait();
}
inline void engine_ext::pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src,
workspace)
.wait();
}
inline void engine_ext::softmax_forward(softmax_algorithm alg,
softmax_mode mode, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline void engine_ext::softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc,
diff_dst, beta, diff_src_desc, diff_src)
.wait();
}
inline void engine_ext::lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace)
.wait();
}
inline void engine_ext::lrn_backward(
lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src, workspace)
.wait();
}
inline sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc,
void *src, const void *valuePtr) {
::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type();
unsigned mem_size = src_desc.get_size();
switch (dt) {
case ::dnnl::memory::data_type::f32:
return fill_with_type<float>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::f16:
return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s32:
return fill_with_type<int32_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s8:
return fill_with_type<int8_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::u8:
return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size);
default:
throw std::runtime_error("async_fill: unsupported data type.");
}
}
inline sycl::event engine_ext::async_reorder(float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng,
dst_desc.get_desc());
auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_scale(float alpha,
const memory_desc_ext &src_desc,
void *src) {
if (alpha == 1.f) {
return sycl::event();
}
void *src_cache = allocate(src_desc);
_q->memcpy(src_cache, src, src_desc.get_size());
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear,
src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f);
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}};
return execute_primitive(primitive, args, {}, {src_cache});
}
inline sycl::event engine_ext::async_sum(float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
void *dst_cache = allocate(dst_desc);
_q->memcpy(dst_cache, dst, dst_desc.get_size());
auto pd = create_primitive_desc<::dnnl::sum>(
std::vector<float>{alpha, beta},
std::vector<::dnnl::memory::desc>{src_desc.get_desc(),
dst_desc.get_desc()});
std::stringstream ss;
ss << (std::uint8_t)pd.get_kind() << alpha << beta;
serialize_mem_desc(ss, pd.src_desc(0));
serialize_mem_desc(ss, pd.src_desc(1));
detail::primitive_cache_key_type key = ss.str();
::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key);
if (!p) {
p = new ::dnnl::sum(pd);
}
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)},
{DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_MULTIPLE_SRC + 1,
::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}};
return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache});
}
inline sycl::event engine_ext::async_binary(
binary_op op, float alpha_0, const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1, void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst) {
::dnnl::algorithm onednn_algorithm;
switch (op) {
case binary_op::max:
onednn_algorithm = ::dnnl::algorithm::binary_max;
break;
case binary_op::min:
onednn_algorithm = ::dnnl::algorithm::binary_min;
break;
case binary_op::add:
onednn_algorithm = ::dnnl::algorithm::binary_add;
break;
case binary_op::sub:
onednn_algorithm = ::dnnl::algorithm::binary_sub;
break;
case binary_op::mul:
onednn_algorithm = ::dnnl::algorithm::binary_mul;
break;
case binary_op::div:
onednn_algorithm = ::dnnl::algorithm::binary_div;
break;
case binary_op::sqrt:
onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt;
break;
case binary_op::neg:
onednn_algorithm = ::dnnl::algorithm::eltwise_linear;
break;
}
if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt ||
onednn_algorithm == ::dnnl::algorithm::eltwise_linear) {
void *src_cache = nullptr, *dst_cache = nullptr;
src_cache = allocate(src_desc_0);
dst_cache = allocate(dst_desc);
_q->memcpy(src_cache, src_0, src_desc_0.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_cache);
async_scale(beta, dst_desc, dst_cache);
// Let the output = 1 - input to simulate the behavior of neg.
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, onednn_algorithm,
src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}};
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(src_cache, *_q);
sycl::free(dst_cache, *_q);
});
});
return e;
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{};
void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr;
src_0_cache = allocate(src_desc_0);
src_1_cache = allocate(src_desc_1);
dst_cache = allocate(dst_desc);
_q->memcpy(src_0_cache, src_0, src_desc_0.get_size());
_q->memcpy(src_1_cache, src_1, src_desc_1.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_0_cache);
async_scale(alpha_1, src_desc_1, src_1_cache);
async_scale(beta, dst_desc, dst_cache);
execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(),
_eng, src_0_cache)});
execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(),
_eng, src_1_cache)});
auto primitive = create_primitive<::dnnl::binary>(
onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(dst_cache, *_q);
sycl::free(src_0_cache, *_q);
sycl::free(src_1_cache, *_q);
});
});
return e;
}
inline sycl::event engine_ext::async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
float p = 2.f;
::dnnl::algorithm onednn_algorithm;
void *cache = nullptr;
switch (op) {
case reduction_op::amax:
cache = allocate(src_desc);
activation_desc adesc;
adesc.set_algorithm(::dnnl::algorithm::eltwise_abs);
async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_max;
src = cache;
break;
case reduction_op::max:
onednn_algorithm = ::dnnl::algorithm::reduction_max;
break;
case reduction_op::min:
onednn_algorithm = ::dnnl::algorithm::reduction_min;
break;
case reduction_op::sum:
onednn_algorithm = ::dnnl::algorithm::reduction_sum;
break;
case reduction_op::mean:
onednn_algorithm = ::dnnl::algorithm::reduction_mean;
break;
case reduction_op::mul:
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
break;
case reduction_op::mul_no_zeros:
cache = allocate(src_desc);
transform_no_zero(src_desc, src, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
src = cache;
break;
case reduction_op::norm1:
p = 1.f;
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum;
break;
case reduction_op::norm2:
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum;
break;
}
auto primitive = create_primitive<::dnnl::reduction>(
onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}};
if (cache) {
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}},
{cache});
}
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_activation_forward(
activation_desc &desc, float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc data_desc = dst_desc.get_desc();
auto alg = desc.get_algorithm();
if ((alg == ::dnnl::algorithm::eltwise_clip) ||
(alg == ::dnnl::algorithm::eltwise_linear) ||
(alg == ::dnnl::algorithm::eltwise_swish)) {
data_desc = src_desc.get_desc();
}
auto primitive = create_primitive<::dnnl::eltwise_backward>(
alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc,
desc.get_alpha(), desc.get_beta(),
create_primitive_desc<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, alg, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline sycl::event engine_ext::async_pooling_forward(
pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive_desc = create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive = create_primitive<::dnnl::pooling_backward>(
desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(),
desc.get_padding(),
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline sycl::event engine_ext::async_softmax_forward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_src_desc, help_dst_desc, 1);
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}});
}
inline sycl::event engine_ext::async_softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_backward>(
softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1,
create_primitive_desc<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc,
help_dst_desc, 1));
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC,
memory_desc_ext(help_diff_src_desc), diff_src}});
}
inline sycl::event engine_ext::async_lrn_forward(
lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_lrn_backward(
lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::lrn_backward>(
::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(),
diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(),
desc.get_alpha(), desc.get_beta(), desc.get_k(),
create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline size_t engine_ext::get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc) {
if (ops == batch_normalization_ops::none) {
return 0;
}
return src_desc.get_size();
}
inline sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var) {
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean,
var, nullptr, nullptr);
}
inline sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
std::vector<void *> caches;
if (has_post_op) {
void *dst_cache = allocate(dst_desc);
caches.push_back(dst_cache);
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr,
nullptr);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache);
}
async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc,
dst_cache);
e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr);
}
inline sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var) {
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
}
inline sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
if (has_post_op) {
if (workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error(
"async_batch_normalization_forward_training_ex: "
"no sufficient workspace.");
}
batch_normalization_forward_internal(
false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc,
workspace, scale_bias_desc, scale, bias, mean_var_desc, saved_mean,
saved_var, running_mean, running_var);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, workspace);
}
return async_activation_forward(adesc, alpha, dst_desc, workspace, beta,
dst_desc, dst);
}
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var,
running_mean, running_var);
}
inline sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_forward_training(
mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc,
dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias,
scale_bias_mean_var_desc, running_mean, running_var, saved_mean,
saved_var, workspace_size, workspace);
}
inline sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var) {
return batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst,
beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var);
}
inline sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
std::vector<void *> caches;
::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc();
void *real_diff_dst = diff_dst;
if (ops != batch_normalization_ops::none &&
workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error(
"async_batch_normalization_backward_ex: "
"no sufficient workspace.");
}
if (ops == batch_normalization_ops::add_activation) {
void *diff_summand_cache = allocate(diff_summand_desc);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace, 0.f,
diff_summand_desc, diff_summand_cache);
caches.push_back(diff_summand_cache);
async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data,
diff_summand_desc, diff_summand);
real_diff_dst_desc = diff_summand_desc.get_desc();
real_diff_dst = diff_summand_cache;
} else if (ops == batch_normalization_ops::activation) {
void *diff_dst_cache = allocate(diff_dst_desc);
caches.push_back(diff_dst_cache);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace, 0.f, diff_dst_desc,
diff_dst_cache);
real_diff_dst = diff_dst_cache;
}
sycl::event e = batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc,
real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias,
mean_var_desc, saved_mean, saved_var);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace) {
return async_batch_normalization_backward(
mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst,
diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src,
diff_summand_desc, diff_summand, alpha_param,
diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var,
workspace_size, workspace);
}
inline sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto origin_src_md = src_desc.get_desc();
auto origin_dst_md = dst_desc.get_desc();
auto origin_weight_md = help_weight_desc;
auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md);
auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md);
auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md);
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md,
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
::dnnl::convolution_forward::primitive_desc pd =
::dnnl::convolution_forward::primitive_desc(
const_cast<dnnl_primitive_desc_t>(
primitive.second->get_primitive_desc()));
auto optimal_src_md = pd.src_desc();
auto optimal_dst_md = pd.dst_desc();
auto optimal_weight_md = pd.weights_desc();
void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight;
std::vector<void *> input_caches, output_caches;
allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md,
optimal_src, input_caches);
allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md,
optimal_dst, output_caches);
allocate_and_reorder_memory_to_optimal(origin_weight_md, weight,
optimal_weight_md, optimal_weight,
input_caches);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}},
{DNNL_ARG_WEIGHTS,
{::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}};
auto e = execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}}, input_caches);
if (origin_dst_md != optimal_dst_md) {
e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md,
dst);
}
async_free(_q, e, nullptr, output_caches);
return e;
}
inline sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst) {
int channel_num = bias_desc.get_element_num();
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::memory::desc help_bias_desc = {{channel_num},
bias_desc.get_desc().get_data_type(),
::dnnl::memory::format_tag::a};
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(),
help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}};
void *cache = nullptr;
if (alpha_0 != 1.f) {
cache = allocate(help_weight_desc);
_q->memcpy(cache, weight, weight_desc.get_size());
async_scale(alpha_0, help_weight_desc, cache);
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache});
} else {
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
}
async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst);
return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc,
dst);
}
inline sycl::event engine_ext::async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto,
diff_src_desc.get_desc(), help_weight_desc, diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_data>(
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline sycl::event engine_ext::async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight) {
if (scale_parameter_preprocess(
{{alpha, beta, diff_weight_desc, diff_weight}})) {
return sycl::event();
}
auto help_diff_weight_desc =
get_group_weight_desc(desc.get_group_count(), diff_weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto,
src_desc.get_desc(), help_diff_weight_desc, diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_weights>(
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_WEIGHTS,
help_diff_weight_desc, diff_weight}});
}
inline sycl::event engine_ext::async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) {
return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst,
beta, diff_bias_desc, diff_bias);
}
inline void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size) {
*weight_space_size = 0;
rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference,
memory_desc_ext(), nullptr, memory_desc_ext(), nullptr,
memory_desc_ext(), nullptr, nullptr, memory_desc_ext(),
nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr,
true, weight_space_size, nullptr, nullptr);
return;
}
inline void engine_ext::rnn_get_scratchpad_workspace_size(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, size_t *scratchpad_size,
size_t *workspace_size) {
*workspace_size = 0;
*scratchpad_size = 0;
rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(),
nullptr, memory_desc_ext(), nullptr, nullptr,
memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0,
nullptr, 0, nullptr, true, nullptr, workspace_size,
scratchpad_size);
return;
}
inline sycl::event engine_ext::async_rnn_forward(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
return rnn_forward_internal(
desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter,
iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size,
workspace, scratchpad_size, scratchpad, false, nullptr, nullptr, nullptr);
}
inline sycl::event engine_ext::async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src,
const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter,
void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_memory_format_tag format_tag;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
void *last_layer_cache = nullptr;
void *hidden_layer_cache = nullptr;
sycl::event e;
std::vector<int> offset(9, 0);
std::vector<void *> data = {
src,
dst,
(uint8_t *)src_iter + iter_desc.get_size(),
nullptr,
(uint8_t *)src_iter_c + iter_c_desc.get_size(),
nullptr,
(uint8_t *)weight + weight_size,
(uint8_t *)workspace + workspace_size,
diff_src,
diff_dst,
(uint8_t *)diff_src_iter + iter_desc.get_size(),
(uint8_t *)diff_dst_iter + iter_desc.get_size(),
(uint8_t *)diff_src_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_weight + weight_size,
scratchpad};
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
if (layer_size > 1) {
last_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
data[8] = last_layer_cache;
}
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset, 1);
if (layer_size > 1) {
data[8] = hidden_layer_cache;
data[9] = last_layer_cache;
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset,
layer_size - 1);
_q->memcpy(
diff_src,
((layer_size - 1) % 2 == 0) ? last_layer_cache : hidden_layer_cache,
src_desc.get_size());
}
} else {
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1);
}
if (last_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(last_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline size_t engine_ext::get_dropout_state_size() {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::queue q;
if (_random_engine_state_size == -1) {
if (_q) {
q = *_q;
} else {
q = dpct::get_current_device().default_queue();
}
auto rand_engine = rng_engine_t(q, 0);
_random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine);
}
return _random_engine_state_size;
#endif
}
inline size_t engine_ext::get_dropout_workspace_size(
const memory_desc_ext &src_desc) {
return src_desc.get_size();
}
inline sycl::event engine_ext::async_dropout_forward(
dropout_desc &desc, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst, void *workspace,
size_t workspace_size) {
if (workspace_size < src_desc.get_size()) {
throw std::runtime_error("async_dropout_forward: no sufficient workspace.");
}
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(dst, 0, dst_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst);
}
float scale_factor = 1.f / (1.f - p);
void *cache = workspace;
memory_desc_ext rng_data_desc(
::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32,
src_desc.get_strides()));
if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) {
cache = allocate(rng_data_desc);
}
desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(),
(std::int32_t *)cache);
if (cache == workspace) {
async_scale(scale_factor, src_desc, workspace);
} else {
async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args);
if (cache != workspace) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { sycl::free(cache, *_q); });
});
}
return e;
}
inline sycl::event engine_ext::async_dropout_backward(
dropout_desc &desc, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &diff_src_desc, void *diff_src, void *workspace,
size_t workspace_size) {
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(diff_src, 0, diff_src_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc,
diff_src);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)},
{DNNL_ARG_SRC_1,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(),
diff_dst_desc.get_desc(), diff_src_desc.get_desc());
return execute_primitive(primitive, execution_args);
}
} // namespace dnnl
} // namespace dpct
#endif // __DPCT_DNNL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/lapack_utils.hpp | //==---- lapack_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LAPACK_UTILS_HPP__
#define __DPCT_LAPACK_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "lib_common_utils.hpp"
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace lapack {
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be
/// solved. \param [in] jobz Must be job::novec or job::vec. \param [in] uplo
/// Must be uplo::upper or uplo::lower. \param [in] n The order of the matrices
/// A and B. \param [in,out] a The symmetric matrix A. \param [in] lda The
/// leading dimension of matrix A. \param [in,out] b The symmetric matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T>
inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
T *w, T *scratchpad, int scratchpad_size, int *info) {
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<T>(a);
auto b_buffer = get_buffer<T>(b);
auto w_buffer = get_buffer<T>(w);
auto scratchpad_buffer = get_buffer<T>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w,
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes all the eigenvalues, and optionally, the eigenvectors of a complex
/// generalized Hermitian positive-definite eigenproblem using a divide and
/// conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be
/// solved. \param [in] jobz Must be job::novec or job::vec. \param [in] uplo
/// Must be uplo::upper or uplo::lower. \param [in] n The order of the matrices
/// A and B. \param [in,out] a The Hermitian matrix A. \param [in] lda The
/// leading dimension of matrix A. \param [in,out] b The Hermitian matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [in] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename Tw>
inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
Tw *w, T *scratchpad, int scratchpad_size, int *info) {
using Ty = typename DataType<T>::T2;
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<Ty>(a);
auto b_buffer = get_buffer<Ty>(b);
auto w_buffer = get_buffer<Tw>(w);
auto scratchpad_buffer = get_buffer<Ty>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda,
(Ty *)b, ldb, w, (Ty *)scratchpad,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian,
/// for complex data) positive-definite matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
T *a[], int lda, int *info, int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t lda_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->lda_info = lda;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrf_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a,
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrf_batch_scratchpad_size/potrf_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
/// Solves a batch of systems of linear equations with a Cholesky-factored
/// symmetric (Hermitian) positive-definite coefficient matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] nrhs The number of right-hand sides.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b Array of pointers to matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
int nrhs, T *a[], int lda, T *b[], int ldb, int *info,
int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t nrhs_info;
std::int64_t lda_info;
std::int64_t ldb_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->nrhs_info = nrhs;
matrix_info->lda_info = lda;
matrix_info->ldb_info = ldb;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), &(matrix_info->lda_info),
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrs_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b,
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrs_batch_scratchpad_size/potrs_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
namespace detail {
template <template <typename> typename functor_t, typename... args_t>
inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info,
std::string const &lapack_api_name, args_t &&...args) {
auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
<< lapack_api_name << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl
<< "detail: " << e.detail() << std::endl;
if (e.info() < std::numeric_limits<int>::min() ||
e.info() > std::numeric_limits<int>::max()) {
throw std::runtime_error("e.info() exceeds the limit of int type");
}
int info_val = static_cast<int>(e.info());
if (info)
dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int),
memcpy_direction::host_to_device)
.wait();
return 1;
};
try {
switch (a_type) {
case library_data_t::real_float: {
functor_t<float>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::real_double: {
functor_t<double>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_float: {
functor_t<std::complex<float>>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_double: {
functor_t<std::complex<double>>()(std::forward<args_t>(args)...);
break;
}
default:
throw std::runtime_error("the data type is unsupported");
}
} catch (oneapi::mkl::lapack::batch_error const &be) {
try {
std::rethrow_exception(be.exceptions()[0]);
} catch (oneapi::mkl::lapack::exception &e) {
return handle_lapack_exception(e);
}
} catch (oneapi::mkl::lapack::exception const &e) {
return handle_lapack_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
if (info) dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait();
return 1;
}
return 0;
}
template <typename T>
class working_memory {
public:
working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) {
_ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q);
}
auto get_memory() {
return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr));
}
auto get_ptr() { return _ptr; }
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_ptr) {
dpct::async_dpct_free({_ptr}, {_e}, _q);
}
}
private:
void *_ptr = nullptr;
sycl::event _e;
sycl::queue _q;
};
std::size_t byte_to_element_number(std::size_t size_in_byte,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
size_in_byte,
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] /
8);
if (dv.rem) {
throw std::runtime_error(
"size_in_byte is not divisible by the size of element (in bytes)");
}
return dv.quot;
}
std::size_t element_number_to_byte(std::size_t size_in_element,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)],
8);
if (dv.rem) {
throw std::runtime_error(
"the size of element (in bits) is not divisible by 8");
}
return size_in_element * dv.quot;
}
inline oneapi::mkl::jobsvd char2jobsvd(signed char job) {
switch (job) {
case 'A':
return oneapi::mkl::jobsvd::vectors;
case 'S':
return oneapi::mkl::jobsvd::somevec;
case 'O':
return oneapi::mkl::jobsvd::vectorsina;
case 'N':
return oneapi::mkl::jobsvd::novec;
default:
throw std::runtime_error("the job type is unsupported");
}
}
template <typename T>
struct getrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T>
struct getrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct getrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>(
q, trans, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data,
b_data, ldb, device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T>
struct geqrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T>
struct geqrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct getrfnp_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::int64_t a_stride = m * lda;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T>
struct gesvd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t u_type, std::int64_t ldu,
library_data_t vt_type, std::int64_t ldvt,
std::size_t &device_ws_size) {
device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>(
q, jobu, jobvt, m, n, lda, ldu, ldvt);
}
};
template <typename T>
struct ElementType {
using value_tpye = T;
};
template <typename T>
struct ElementType<std::complex<T>> {
using value_tpye = T;
};
template <typename T>
struct gesvd_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto s_data = dpct::detail::get_memory(
reinterpret_cast<typename ElementType<T>::value_tpye *>(s));
auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u));
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data,
u_data, ldu, vt_data, ldvt, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct gesvd_conj_impl : public gesvd_impl<T> {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using base = gesvd_impl<T>;
base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u,
ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans,
n, n, T(1.0f), vt_data, ldvt, ldvt);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T>
struct potrf_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda);
}
};
template <typename T>
struct potrf_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct potrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>(
q, uplo, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb,
device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T>
struct value_type_trait {
using value_type = T;
};
template <typename T>
struct value_type_trait<std::complex<T>> {
using value_type = T;
};
template <typename T>
auto lamch_s() {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
if constexpr (std::is_same_v<T, float>) {
return slamch("S");
} else if constexpr (std::is_same_v<T, double>) {
return dlamch("S");
}
throw std::runtime_error("the type is unsupported");
#endif
}
#define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
#define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
template <typename T>
struct syheevx_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, void *vl, void *vu,
std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range,
uplo, n, lda, vl_value, vu_value, il, iu,
abstol, lda);
#endif
}
};
template <typename T>
constexpr library_data_t get_library_data_t_from_type() {
if constexpr (std::is_same_v<T, float>) {
return library_data_t::real_float;
} else if constexpr (std::is_same_v<T, double>) {
return library_data_t::real_double;
} else if constexpr (std::is_same_v<T, sycl::float2> ||
std::is_same_v<T, std::complex<float>>) {
return library_data_t::complex_float;
} else if constexpr (std::is_same_v<T, sycl::double2> ||
std::is_same_v<T, std::complex<double>>) {
return library_data_t::complex_double;
}
throw std::runtime_error("the type is unsupported");
}
template <typename T>
struct syheevx_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda,
vl_value, vu_value, il, iu, abstol,
m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T>
struct syhegvx_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl,
void *vu, std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz,
range, uplo, n, lda, ldb, vl_value,
vu_value, il, iu, abstol, lda);
#endif
}
};
template <typename T>
struct syhegvx_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, void *a, std::int64_t lda, void *b,
std::int64_t ldb, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data,
lda, b_data, ldb, vl_value, vu_value, il, iu,
abstol, m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T>
struct syhegvd_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::int64_t ldb, std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz,
uplo, n, lda, ldb);
}
};
template <typename T>
struct syhegvd_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *b, std::int64_t ldb, void *w,
void *device_ws, std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda,
b_data, ldb, w_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) {
oneapi::mkl::compz ret;
if (job == oneapi::mkl::job::novec) {
ret = oneapi::mkl::compz::novectors;
} else if (job == oneapi::mkl::job::vec) {
ret = oneapi::mkl::compz::vectors;
} else {
throw std::runtime_error("the job type is unsupported");
}
return ret;
}
template <typename T>
struct syheev_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n,
lda);
#endif
}
};
template <typename T>
struct syheev_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T>
struct syheevd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n,
lda);
}
};
template <typename T>
struct syheevd_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
#undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE
#undef DISPATCH_FLOAT_FOR_CALCULATION
template <typename T>
struct trtri_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
device_ws_size =
oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda);
#endif
}
};
template <typename T>
struct trtri_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
void *a, std::int64_t lda, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
} // namespace detail
/// Computes the size of workspace memory of getrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>(
q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the LU factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by L and U. The unit
/// diagonal elements of L are not stored.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting
/// LU factorization is computed.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
if (ipiv == nullptr) {
return detail::lapack_shim<detail::getrfnp_impl>(
q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv,
device_ws, device_ws_size_in_element_number, info);
}
return detail::lapack_shim<detail::getrf_impl>(
q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws,
device_ws_size_in_element_number, info);
#endif
}
/// Solves a system of linear equations with a LU-factored square coefficient
/// matrix, with multiple right-hand sides.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] trans Indicates the form of the linear equation.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] a The input matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ipiv The pivot indices.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::getrs_impl>(
q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type,
a, lda, ipiv, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of geqrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>(
q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the QR factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the factorization
/// data. \param [in] lda The leading dimension of the matrix A. \param [in]
/// tau_type The data type of the array tau. \param [in] tau The array contains
/// scalars that define elementary reflectors for the matrix Q in its
/// decomposition in a product of elementary reflectors. \param [in] device_ws
/// The workspace. \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::geqrf_impl>(
q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu,
signed char jobvt, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m,
n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] host_ws_size The host workspace size as a number of elements
/// of type \param a_type. Currently the value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
std::int64_t all_vec, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, int *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
std::size_t device_ws_size_64;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type,
lda, u_type, ldu, vt_type, ldvt, device_ws_size_64);
*device_ws_size = device_ws_size_64;
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::gesvd_impl>(
q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda,
s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws,
device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec.
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
detail::lapack_shim<detail::gesvd_conj_impl>(
q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s,
u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
return 0;
}
/// Computes the size of workspace memory of potrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>(
q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the Cholesky factorization of a symmetric (Hermitian)
/// positive-definite matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::potrf_impl>(
q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
}
/// Solves a system of linear equations with a Cholesky-factored symmetric
/// (Hermitian) positive-definite coefficient matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::potrs_impl>(
q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type,
a, lda, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
void *vl, void *vu, std::int64_t il,
std::int64_t iu, library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q,
compz_jobz, range, uplo, n, lda, vl, vu, il, iu, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle
/// is overwritten. \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a,
lda, vl, vu, il, iu, m, w_type, w, device_ws,
device_ws_size_in_element_number, info);
q.wait();
return ret;
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
ValueT vl, ValueT vu, int il, int iu,
int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo,
n, lda, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle
/// is overwritten. \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q,
compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a,
lda, &vl, &vu, il, iu, &m64,
detail::get_library_data_t_from_type<ValueT>(), w, device_ws,
device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvx/hegvx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syhegvx_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, ValueT vl, ValueT vu, int il,
int iu, int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz,
range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a real
/// generalized symmetric/Hermitian definite eigenproblem.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle
/// is overwritten. \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *m, ValueT *w, T *device_ws, int device_ws_size,
int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syhegvx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q,
itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64,
w, device_ws, device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvd/hegvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syhegvd_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n,
lda, ldb, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric/Hermitian definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors. \param [in] lda The leading dimension of the matrix A. \param
/// [in, out] b The input matrix B. \param [in] ldb The leading dimension of the
/// matrix B. \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
return detail::lapack_shim<detail::syhegvd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q,
itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syev/heev function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int *device_ws_size) {
std::size_t device_ws_size_tmp;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda,
device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric
/// or Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
int n, T *a, int lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
return detail::lapack_shim<detail::syheev_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q,
compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q,
jobz, uplo, n, a_type, lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t w_type, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::syheevd_impl>(
q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t lda, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n,
detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, T *a,
std::int64_t lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
return detail::lapack_shim<detail::syheevd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q,
jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w,
device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of trtri function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>(
q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type,
lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the inverse of a triangular matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// the inverse matrix of A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::trtri_impl>(
q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
#endif
}
} // namespace lapack
} // namespace dpct
#endif // __DPCT_LAPACK_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/fft_utils.hpp | //==---- fft_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FFT_UTILS_HPP__
#define __DPCT_FFT_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <optional>
#include <sycl/sycl.hpp>
#include <utility>
#include "lib_common_utils.hpp"
namespace dpct {
namespace fft {
/// An enumeration type to describe the FFT direction is forward or backward.
enum fft_direction : int { forward = 0, backward };
/// An enumeration type to describe the types of FFT input and output data.
enum fft_type : int {
real_float_to_complex_float = 0,
complex_float_to_real_float,
real_double_to_complex_double,
complex_double_to_real_double,
complex_float_to_complex_float,
complex_double_to_complex_double,
};
/// A class to perform FFT calculation.
class fft_engine {
public:
/// Default constructor.
fft_engine() {}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
library_data_t input_type, long long *onembed, long long ostride,
long long odist, library_data_t output_type, long long batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<long long>(dim, n, inembed, istride, idist, input_type, onembed,
ostride, odist, output_type, batch,
direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, library_data_t input_type, int *onembed,
int ostride, int odist, library_data_t output_type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride,
odist, output_type, batch, direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, int *onembed, int ostride, int odist,
fft_type type, int batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(1);
_n[0] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 1;
_batch = batch;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(2);
_n[0] = n2;
_n[1] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 2;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(3);
_n[0] = n3;
_n[1] = n2;
_n[2] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 3;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Create the class for calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int n1, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n1, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement);
return engine;
}
/// Create the class for calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n3, n2, n1, type, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride,
int idist, int *onembed, int ostride, int odist, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed,
ostride, odist, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate FFT without commit any config.
static fft_engine *create() {
fft_engine *engine = new fft_engine();
return engine;
}
/// Destroy the class for calculate FFT.
/// \param [in] engine Pointer returned from fft_engine::craete.
static void destroy(fft_engine *engine) { delete engine; }
#ifdef __INTEL_MKL__
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int dim, long long *n, long long *inembed, long long istride,
long long idist, long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int dim, int *n, int *inembed, int istride, int idist, int *onembed,
int ostride, int odist, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 1-D FFT.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If it is not set, forward direction(if current FFT is
/// complex-to-complex) and out-of-place (false) are set by default.
static void estimate_size(
int n1, fft_type type, int batch, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n1, type, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 2-D FFT.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int n2, int n1, fft_type type, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 3-D FFT.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int n3, int n2, int n1, fft_type type, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n3, n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
#endif
/// Execute the FFT calculation.
/// \param [in] input Pointer to the input data.
/// \param [out] output Pointer to the output data.
/// \param [in] direction The FFT direction.
template <typename input_t, typename output_t>
void compute(input_t *input, output_t *output, fft_direction direction) {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
} else if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
}
}
template <>
void compute(float *input, sycl::float2 *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(sycl::float2 *input, float *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(double *input, sycl::double2 *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::double2 *input, double *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::float2 *input, sycl::float2 *output,
fft_direction direction) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
}
template <>
void compute(sycl::double2 *input, sycl::double2 *output,
fft_direction direction) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
}
/// Setting the user's SYCL queue for calculation.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) { _q = q; }
#ifdef __INTEL_MKL__
/// Setting whether to use external or internal workspace.
/// \param [in] flag True means using internal workspace. False means using
/// external workspace.
void use_internal_workspace(bool flag = true) {
_use_external_workspace = !flag;
}
/// Specify the external workspace.
/// \param [in] ptr Pointer to the workspace.
void set_workspace(void *ptr) {
if (!_use_external_workspace) {
return;
}
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sc->set_workspace(data);
}
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dc->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sr->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dr->set_workspace(data);
}
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
#endif
/// Get the workspace size.
/// \param [out] scratchpad_size Workspace size in bytes.
void get_workspace_size(size_t *scratchpad_size) {
if (scratchpad_size) {
*scratchpad_size = _workspace_bytes;
}
}
private:
static std::pair<library_data_t, library_data_t> fft_type_to_data_type(
fft_type type) {
switch (type) {
case fft_type::real_float_to_complex_float: {
return std::make_pair(library_data_t::real_float,
library_data_t::complex_float);
}
case fft_type::complex_float_to_real_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::real_float);
}
case fft_type::real_double_to_complex_double: {
return std::make_pair(library_data_t::real_double,
library_data_t::complex_double);
}
case fft_type::complex_double_to_real_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::real_double);
}
case fft_type::complex_float_to_complex_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::complex_float);
}
case fft_type::complex_double_to_complex_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::complex_double);
}
}
}
void config_and_commit_basic() {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
_desc_sc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n) distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_sc->commit(*_q);
#endif
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
_desc_dc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n) distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_dc->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
_desc_sr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
_desc_dr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
#endif
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
void config_and_commit_advanced() {
#ifdef __INTEL_MKL__
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \
if (_use_external_workspace) { \
DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \
} \
if (_is_estimate_call) { \
if (_q->get_device().is_gpu()) { \
DESC->get_value( \
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \
&_workspace_estimate_bytes); \
} \
} else { \
DESC->commit(*_q); \
if (_is_estimate_call) { \
DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \
&_workspace_bytes); \
} \
} \
}
#else
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::NOT_INPLACE); \
DESC->commit(*_q); \
}
#endif
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double);
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float);
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double);
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
#undef CONFIG_AND_COMMIT
}
template <typename T>
void init(int dim, T *n, T *inembed, T istride, T idist,
library_data_t input_type, T *onembed, T ostride, T odist,
library_data_t output_type, T batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement) {
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
_n.resize(dim);
_inembed.resize(dim);
_onembed.resize(dim);
_input_type = input_type;
_output_type = output_type;
for (int i = 0; i < dim; i++) {
_n[i] = n[i];
}
if (inembed && onembed) {
for (int i = 0; i < dim; i++) {
_inembed[i] = inembed[i];
_onembed[i] = onembed[i];
}
_istride = istride;
_ostride = ostride;
if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)) {
_fwd_dist = idist;
_bwd_dist = odist;
} else if ((_output_type == library_data_t::real_float &&
_input_type == library_data_t::complex_float) ||
(_output_type == library_data_t::real_double &&
_input_type == library_data_t::complex_double)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
if (_is_user_specified_dir_and_placement &&
(_direction == fft_direction::backward)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
_fwd_dist = idist;
_bwd_dist = odist;
}
}
} else {
_is_basic = true;
}
_batch = batch;
_dim = dim;
if (_is_basic)
config_and_commit_basic();
else
config_and_commit_advanced();
}
template <class Desc_t>
void set_stride_advanced(std::shared_ptr<Desc_t> desc) {
if (_dim == 1) {
std::int64_t input_stride[2] = {0, _istride};
std::int64_t output_stride[2] = {0, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 2) {
std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride};
std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 3) {
std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride,
_inembed[2] * _istride, _istride};
std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride,
_onembed[2] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
}
}
template <class Desc_t>
void swap_distance(std::shared_ptr<Desc_t> desc) {
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist);
std::int64_t temp = _bwd_dist;
_bwd_dist = _fwd_dist;
_fwd_dist = temp;
}
template <bool Is_inplace, class Desc_t>
void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) {
std::int64_t forward_distance = 0;
std::int64_t backward_distance = 0;
#define SET_STRIDE \
{ \
if (_direction == fft_direction::forward) { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
real_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
complex_stride); \
} else { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
complex_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
real_stride); \
} \
}
if (_dim == 1) {
if constexpr (Is_inplace) {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = 2 * (_n[0] / 2 + 1);
backward_distance = _n[0] / 2 + 1;
} else {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = _n[0];
backward_distance = _n[0] / 2 + 1;
}
} else if (_dim == 2) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * 2 * (_n[1] / 2 + 1);
backward_distance = _n[0] * (_n[1] / 2 + 1);
} else {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, _n[1], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1];
backward_distance = _n[0] * (_n[1] / 2 + 1);
}
} else if (_dim == 3) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1),
2 * (_n[2] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1);
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
} else {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * _n[2];
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
}
}
#undef SET_STRIDE
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
forward_distance);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
backward_distance);
}
#define COMPUTE(DESC) \
{ \
if (_is_inplace) { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input); \
} \
} else { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
auto data_output = \
dpct::detail::get_memory(reinterpret_cast<T *>(output)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \
} \
} \
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_complex(T *input, T *output, fft_direction direction) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The complex domain descriptor need different config values if the
// FFT direction or placement is different.
// Here we check the conditions, and new config values are set and
// re-committed if needed.
if (direction != _direction || is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
if (direction != _direction) {
swap_distance(_desc_sc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_sc->commit(*_q);
} else {
if (direction != _direction) {
swap_distance(_desc_dc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_dc->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sc);
} else {
COMPUTE(_desc_dc);
}
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_real(T *input, T *output) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The real domain descriptor need different config values if the
// FFT placement is different.
// Here we check the condition, and new config values are set and
// re-committed if needed.
if (is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<true>(_desc_sr);
} else {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
} else {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<true>(_desc_dr);
} else {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sr);
} else {
COMPUTE(_desc_dr);
}
}
#undef COMPUTE
private:
sycl::queue *_q = nullptr;
int _dim;
std::vector<std::int64_t> _n;
std::vector<std::int64_t> _inembed;
std::int64_t _istride;
std::int64_t _fwd_dist;
library_data_t _input_type;
std::vector<std::int64_t> _onembed;
std::int64_t _ostride;
std::int64_t _bwd_dist;
library_data_t _output_type;
std::int64_t _batch = 1;
bool _is_basic = false;
bool _is_inplace = false;
fft_direction _direction = fft_direction::forward;
bool _is_user_specified_dir_and_placement = false;
bool _use_external_workspace = false;
void *_external_workspace_ptr = nullptr;
size_t _workspace_bytes = 0;
bool _is_estimate_call = false;
size_t _workspace_estimate_bytes = 0;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>
_desc_sr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>
_desc_dr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_sc;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_dc;
};
using fft_engine_ptr = fft_engine *;
} // namespace fft
} // namespace dpct
#endif // __DPCT_FFT_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/lib_common_utils.hpp | //==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LIB_COMMON_UTILS_HPP__
#define __DPCT_LIB_COMMON_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace detail {
template <typename T>
inline auto get_memory(T *x) {
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<std::remove_cv_t<T>>(x);
#else
return x;
#endif
}
template <typename T>
inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) {
using Ty = typename DataType<T>::T2;
Ty s_h;
detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait();
return s_h;
}
} // namespace detail
enum class version_field : int { major, minor, update, patch };
/// Returns the requested field of Intel(R) oneAPI Math Kernel Library version.
/// \param field The version information field (major, minor, update or patch).
/// \param result The result value.
inline void mkl_get_version(version_field field, int *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
MKLVersion version;
mkl_get_version(&version);
if (version_field::major == field) {
*result = version.MajorVersion;
} else if (version_field::minor == field) {
*result = version.MinorVersion;
} else if (version_field::update == field) {
*result = version.UpdateVersion;
} else if (version_field::patch == field) {
*result = 0;
} else {
throw std::runtime_error("unknown field");
}
#endif
}
enum class library_data_t : unsigned char {
real_float = 0,
complex_float,
real_double,
complex_double,
real_half,
complex_half,
real_bfloat16,
complex_bfloat16,
real_int4,
complex_int4,
real_uint4,
complex_uint4,
real_int8,
complex_int8,
real_uint8,
complex_uint8,
real_int16,
complex_int16,
real_uint16,
complex_uint16,
real_int32,
complex_int32,
real_uint32,
complex_uint32,
real_int64,
complex_int64,
real_uint64,
complex_uint64,
real_int8_4,
real_int8_32,
real_uint8_4,
library_data_t_size
};
namespace detail {
template <typename ArgT>
inline constexpr std::uint64_t get_type_combination_id(ArgT Val) {
static_assert((unsigned char)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
return (std::uint64_t)Val;
}
template <typename FirstT, typename... RestT>
inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
RestT... RestVal) {
static_assert((std::uint8_t)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
}
inline constexpr std::size_t library_data_size[] = {
8 * sizeof(float), // real_float
8 * sizeof(std::complex<float>), // complex_float
8 * sizeof(double), // real_double
8 * sizeof(std::complex<double>), // complex_double
8 * sizeof(sycl::half), // real_half
8 * sizeof(std::complex<sycl::half>), // complex_half
16, // real_bfloat16
16 * 2, // complex_bfloat16
4, // real_int4
4 * 2, // complex_int4
4, // real_uint4
4 * 2, // complex_uint4
8, // real_int8
8 * 2, // complex_int8
8, // real_uint8
8 * 2, // complex_uint8
16, // real_int16
16 * 2, // complex_int16
16, // real_uint16
16 * 2, // complex_uint16
32, // real_int32
32 * 2, // complex_int32
32, // real_uint32
32 * 2, // complex_uint32
64, // real_int64
64 * 2, // complex_int64
64, // real_uint64
64 * 2, // complex_uint64
8, // real_int8_4
8, // real_int8_32
8 // real_uint8_4
};
} // namespace detail
} // namespace dpct
#endif // __DPCT_LIB_COMMON_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/sparse_utils.hpp | //==---- sparse_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_SPARSE_UTILS_HPP__
#define __DPCT_SPARSE_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "lib_common_utils.hpp"
namespace dpct {
namespace sparse {
/// Describes properties of a sparse matrix.
/// The properties are matrix type, diag, uplo and index base.
class matrix_info {
public:
/// Matrix types are:
/// ge: General matrix
/// sy: Symmetric matrix
/// he: Hermitian matrix
/// tr: Triangular matrix
enum class matrix_type : int { ge = 0, sy, he, tr };
auto get_matrix_type() const { return _matrix_type; }
auto get_diag() const { return _diag; }
auto get_uplo() const { return _uplo; }
auto get_index_base() const { return _index_base; }
void set_matrix_type(matrix_type mt) { _matrix_type = mt; }
void set_diag(oneapi::mkl::diag d) { _diag = d; }
void set_uplo(oneapi::mkl::uplo u) { _uplo = u; }
void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; }
private:
matrix_type _matrix_type = matrix_type::ge;
oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit;
oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper;
oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero;
};
/// Computes a CSR format sparse matrix-dense vector product.
/// y = alpha * op(A) * x + beta * y
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] num_rows Number of rows of the matrix A.
/// \param [in] num_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] x Data of the vector x.
/// \param [in] beta Scaling factor for the vector x.
/// \param [in, out] y Data of the vector y.
template <typename T>
void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows,
int num_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *x, const T *beta,
T *y) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows,
num_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x)));
auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle);
oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::sy: {
oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::tr: {
oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans,
info->get_diag(),
*sparse_matrix_handle);
oneapi::mkl::sparse::trmv(
queue, info->get_uplo(), trans, info->get_diag(), alpha_value,
*sparse_matrix_handle, data_x, beta_value, data_y);
break;
}
default:
throw std::runtime_error(
"the spmv does not support matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
/// Computes a CSR format sparse matrix-dense matrix product.
/// C = alpha * op(A) * B + beta * C
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] sparse_rows Number of rows of the matrix A.
/// \param [in] dense_cols Number of columns of the matrix B or C.
/// \param [in] sparse_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] b Data of the matrix B.
/// \param [in] ldb Leading dimension of the matrix B.
/// \param [in] beta Scaling factor for the matrix B.
/// \param [in, out] c Data of the matrix C.
/// \param [in] ldc Leading dimension of the matrix C.
template <typename T>
void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows,
int dense_cols, int sparse_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *b, int ldb,
const T *beta, T *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows,
sparse_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b)));
auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans,
oneapi::mkl::transpose::nontrans, alpha_value,
*sparse_matrix_handle, data_b, dense_cols, ldb,
beta_value, data_c, ldc);
break;
}
default:
throw std::runtime_error(
"the csrmm does not support matrix_info::matrix_type::sy, "
"matrix_info::matrix_type::tr and matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Saving the optimization information for solving a system of linear
/// equations.
class optimize_info {
public:
/// Constructor
optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); }
/// Destructor
~optimize_info() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destructor.
/// \param [in] e The event which the destructor depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
private:
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
};
#endif
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Performs internal optimizations for solving a system of linear equations for
/// a CSR format sparse matrix.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the sparse matrix.
/// \param [in] row_col Number of rows of the sparse matrix.
/// \param [in] info Matrix info of the sparse matrix.
/// \param [in] val An array containing the non-zero elements of the sparse
/// matrix. \param [in] row_ptr An array of length \p num_rows + 1. \param [in]
/// col_ind An array containing the column indices in index-based numbering.
/// \param [out] optimize_info The result of the optimizations.
template <typename T>
void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans,
int row_col, const std::shared_ptr<matrix_info> info,
const T *val, const int *row_ptr, const int *col_ind,
std::shared_ptr<optimize_info> optimize_info) {
using Ty = typename dpct::DataType<T>::T2;
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(),
row_col, row_col, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
if (info->get_matrix_type() != matrix_info::matrix_type::tr) return;
#ifndef DPCT_USM_LEVEL_NONE
sycl::event e;
e =
#endif
oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans,
info->get_diag(),
optimize_info->get_matrix_handle());
#ifndef DPCT_USM_LEVEL_NONE
optimize_info->add_dependency(e);
#endif
}
#endif
class sparse_matrix_desc;
using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>;
/// Structure for describe a dense vector
class dense_vector_desc {
public:
dense_vector_desc(std::int64_t ele_num, void *value,
library_data_t value_type)
: _ele_num(ele_num), _value(value), _value_type(value_type) {}
void get_desc(std::int64_t *ele_num, const void **value,
library_data_t *value_type) const noexcept {
*ele_num = _ele_num;
*value = _value;
*value_type = _value_type;
}
void get_desc(std::int64_t *ele_num, void **value,
library_data_t *value_type) const noexcept {
get_desc(ele_num, const_cast<const void **>(value), value_type);
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
private:
std::int64_t _ele_num;
void *_value;
library_data_t _value_type;
};
/// Structure for describe a dense matrix
class dense_matrix_desc {
public:
dense_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t leading_dim, void *value,
library_data_t value_type, oneapi::mkl::layout layout)
: _row_num(row_num),
_col_num(col_num),
_leading_dim(leading_dim),
_value(value),
_value_type(value_type),
_layout(layout) {}
void get_desc(std::int64_t *row_num, std::int64_t *col_num,
std::int64_t *leading_dim, void **value,
library_data_t *value_type,
oneapi::mkl::layout *layout) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*leading_dim = _leading_dim;
*value = _value;
*value_type = _value_type;
*layout = _layout;
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
std::int64_t get_col_num() const noexcept { return _col_num; }
std::int64_t get_leading_dim() const noexcept { return _leading_dim; }
oneapi::mkl::layout get_layout() const noexcept { return _layout; }
private:
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _leading_dim;
void *_value;
library_data_t _value_type;
oneapi::mkl::layout _layout;
};
/// Sparse matrix data format
enum matrix_format : int {
csr = 1,
};
/// Sparse matrix attribute
enum matrix_attribute : int { uplo = 0, diag };
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Structure for describe a sparse matrix
class sparse_matrix_desc {
public:
/// Constructor
/// \param [out] desc The descriptor to be created
/// \param [in] row_num Number of rows of the sparse matrix.
/// \param [in] col_num Number of colums of the sparse matrix.
/// \param [in] nnz Non-zero elements in the sparse matrix.
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse
/// matrix. \param [in] row_ptr_type Data type of the \p row_ptr . \param [in]
/// col_ind_type Data type of the \p col_ind . \param [in] base Indicates how
/// input arrays are indexed. \param [in] value_type Data type of the \p value
/// . \param [in] data_format The matrix data format.
sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t nnz, void *row_ptr, void *col_ind,
void *value, library_data_t row_ptr_type,
library_data_t col_ind_type, oneapi::mkl::index_base base,
library_data_t value_type, matrix_format data_format)
: _row_num(row_num),
_col_num(col_num),
_nnz(nnz),
_row_ptr(row_ptr),
_col_ind(col_ind),
_value(value),
_row_ptr_type(row_ptr_type),
_col_ind_type(col_ind_type),
_base(base),
_value_type(value_type),
_data_format(data_format) {
if (_data_format != matrix_format::csr) {
throw std::runtime_error("the sparse matrix data format is unsupported");
}
oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle);
construct();
}
/// Destructor
~sparse_matrix_desc() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destroy method.
/// \param [in] e The event which the destroy method depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
/// Get the values saved in the descriptor
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
/// \param [out] row_ptr An array of length \p row_num + 1.
/// \param [out] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] value An array containing the non-zero elements of the sparse
/// matrix. \param [out] row_ptr_type Data type of the \p row_ptr . \param
/// [out] col_ind_type Data type of the \p col_ind . \param [out] base
/// Indicates how input arrays are indexed. \param [out] value_type Data type
/// of the \p value .
void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz,
void **row_ptr, void **col_ind, void **value,
library_data_t *row_ptr_type, library_data_t *col_ind_type,
oneapi::mkl::index_base *base,
library_data_t *value_type) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
*row_ptr = _row_ptr;
*col_ind = _col_ind;
*value = _value;
*row_ptr_type = _row_ptr_type;
*col_ind_type = _col_ind_type;
*base = _base;
*value_type = _value_type;
}
/// Get the sparse matrix data format of this descriptor
/// \param [out] format The matrix data format result
void get_format(matrix_format *data_format) const noexcept {
*data_format = _data_format;
}
/// Get the index base of this descriptor
/// \param [out] base The index base result
void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; }
/// Get the value pointer of this descriptor
/// \param [out] value The value pointer result
void get_value(void **value) const noexcept { *value = _value; }
/// Set the value pointer of this descriptor
/// \param [in] value The input value pointer
void set_value(void *value) {
// Assume the new data is different from the old data
_value = value;
construct();
}
/// Get the size of the sparse matrix
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
void get_size(int64_t *row_num, int64_t *col_num,
int64_t *nnz) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
}
/// Set the sparse matrix attribute
/// \param [in] attribute The attribute type
/// \param [in] data The attribute value
/// \param [in] data_size The data size of the attribute value
void set_attribute(matrix_attribute attribute, const void *data,
size_t data_size) {
if (attribute == matrix_attribute::diag) {
const oneapi::mkl::diag *diag_ptr =
reinterpret_cast<const oneapi::mkl::diag *>(data);
if (*diag_ptr == oneapi::mkl::diag::unit) {
_diag = oneapi::mkl::diag::unit;
} else if (*diag_ptr == oneapi::mkl::diag::nonunit) {
_diag = oneapi::mkl::diag::nonunit;
} else {
throw std::runtime_error("unsupported diag value");
}
} else if (attribute == matrix_attribute::uplo) {
const oneapi::mkl::uplo *uplo_ptr =
reinterpret_cast<const oneapi::mkl::uplo *>(data);
if (*uplo_ptr == oneapi::mkl::uplo::upper) {
_uplo = oneapi::mkl::uplo::upper;
} else if (*uplo_ptr == oneapi::mkl::uplo::lower) {
_uplo = oneapi::mkl::uplo::lower;
} else {
throw std::runtime_error("unsupported uplo value");
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Get the sparse matrix attribute
/// \param [out] attribute The attribute type
/// \param [out] data The attribute value
/// \param [out] data_size The data size of the attribute value
void get_attribute(matrix_attribute attribute, void *data,
size_t data_size) const {
if (attribute == matrix_attribute::diag) {
oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data);
if (_diag.has_value()) {
*diag_ptr = _diag.value();
} else {
*diag_ptr = oneapi::mkl::diag::nonunit;
}
} else if (attribute == matrix_attribute::uplo) {
oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data);
if (_uplo.has_value()) {
*uplo_ptr = _uplo.value();
} else {
*uplo_ptr = oneapi::mkl::uplo::lower;
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Set the pointers for describing the sparse matrix
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse
/// matrix.
void set_pointers(void *row_ptr, void *col_ind, void *value) {
// Assume the new data is different from the old data
_row_ptr = row_ptr;
_col_ind = col_ind;
_value = value;
construct();
}
/// Get the diag attribute
/// \return diag value
std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; }
/// Get the uplo attribute
/// \return uplo value
std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; }
private:
template <typename index_t, typename value_t>
void set_data() {
auto data_row_ptr =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr));
auto data_col_ind =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind));
auto data_value =
dpct::detail::get_memory(reinterpret_cast<value_t *>(_value));
oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle,
_row_num, _col_num, _base, data_row_ptr,
data_col_ind, data_value);
get_default_queue().wait();
}
void construct() {
std::uint64_t key = dpct::detail::get_type_combination_id(
_row_ptr_type, _col_ind_type, _value_type);
switch (key) {
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_float): {
set_data<std::int32_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_double): {
set_data<std::int32_t, double>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_float): {
set_data<std::int32_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_double): {
set_data<std::int32_t, std::complex<double>>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_float): {
set_data<std::int64_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_double): {
set_data<std::int64_t, double>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_float): {
set_data<std::int64_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_double): {
set_data<std::int64_t, std::complex<double>>();
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _nnz;
void *_row_ptr;
void *_col_ind;
void *_value;
library_data_t _row_ptr_type;
library_data_t _col_ind_type;
oneapi::mkl::index_base _base;
library_data_t _value_type;
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
matrix_format _data_format;
std::optional<oneapi::mkl::uplo> _uplo;
std::optional<oneapi::mkl::diag> _diag;
};
namespace detail {
#ifdef DPCT_USM_LEVEL_NONE
#define SPARSE_CALL(X) \
do { \
X; \
} while (0)
#else
#define SPARSE_CALL(X) \
do { \
sycl::event e = X; \
a->add_dependency(e); \
} while (0)
#endif
template <typename Ty>
inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value()));
auto data_y =
dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value()));
if (a->get_diag().has_value() && a->get_uplo().has_value()) {
oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans,
a->get_diag().value(),
a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::trmv(
queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value,
a->get_matrix_handle(), data_x, beta_value, data_y));
} else {
oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
a->get_matrix_handle(), data_x,
beta_value, data_y));
}
}
template <typename Ty>
inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a,
std::shared_ptr<dense_matrix_desc> b, const void *beta,
std::shared_ptr<dense_matrix_desc> c) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value()));
auto data_c =
dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value()));
SPARSE_CALL(oneapi::mkl::sparse::gemm(
queue, b->get_layout(), trans_a, trans_b, alpha_value,
a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(),
beta_value, data_c, c->get_leading_dim()));
}
#undef SPARSE_CALL
} // namespace detail
/// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta
/// * y. \param [in] queue The queue where the routine should be executed. It
/// must have the in_order property when using the USM mode. \param [in] trans
/// Specifies operation on input matrix. \param [in] alpha Specifies the scalar
/// alpha. \param [in] a Specifies the sparse matrix a. \param [in] x Specifies
/// the dense vector x. \param [in] beta Specifies the scalar beta. \param [in,
/// out] y Specifies the dense vector y. \param [in] data_type Specifies the
/// data type of \param a, \param x and \param y .
inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y,
library_data_t data_type) {
switch (data_type) {
case library_data_t::real_float: {
detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::real_double: {
detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_float: {
detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta,
y);
break;
}
case library_data_t::complex_double: {
detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta,
y);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) +
/// beta * c. \param [in] queue The queue where the routine should be executed.
/// It must have the in_order property when using the USM mode. \param [in]
/// trans_a Specifies operation on input matrix a. \param [in] trans_b Specifies
/// operation on input matrix b. \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] b Specifies the dense matrix b.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] c Specifies the dense matrix c.
/// \param [in] data_type Specifies the data type of \param a, \param b and
/// \param c .
inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b,
const void *beta, std::shared_ptr<dense_matrix_desc> c,
library_data_t data_type) {
if (b->get_layout() != c->get_layout())
throw std::runtime_error("the layout of b and c are different");
switch (data_type) {
case library_data_t::real_float: {
detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::real_double: {
detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::complex_float: {
detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
case library_data_t::complex_double: {
detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
#endif
} // namespace sparse
} // namespace dpct
#endif // __DPCT_SPARSE_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <sycl/sycl.hpp>
#include <thread>
#include <vector>
#if defined(__linux__)
#include <sys/syscall.h>
#include <unistd.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
inline auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::event *event_ptr;
typedef sycl::queue *queue_ptr;
typedef char *device_ptr;
/// Destroy \p event pointed memory.
///
/// \param event Pointer to the sycl::event address.
static void destroy_event(event_ptr event) { delete event; }
class device_info {
public:
// get interface
const char *get_name() const { return _name; }
char *get_name() { return _name; }
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() const {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
bool get_host_unified_memory() const { return _host_unified_memory; }
int get_major_version() const { return _major; }
int get_minor_version() const { return _minor; }
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
int get_max_work_group_size() const { return _max_work_group_size; }
int get_max_sub_group_size() const { return _max_sub_group_size; }
int get_max_work_items_per_compute_unit() const {
return _max_work_items_per_compute_unit;
}
int get_max_register_size_per_work_group() const {
return _max_register_size_per_work_group;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() const {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
size_t get_global_mem_size() const { return _global_mem_size; }
size_t get_local_mem_size() const { return _local_mem_size; }
/// Returns the maximum clock rate of device's global memory in kHz. If
/// compiler does not support this API then returns default value 3200000 kHz.
unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
/// Returns the maximum bus width between device and memory in bits. If
/// compiler does not support this API then returns default value 64 bits.
unsigned int get_memory_bus_width() const { return _memory_bus_width; }
uint32_t get_device_id() const { return _device_id; }
std::array<unsigned char, 16> get_uuid() const { return _uuid; }
// set interface
void set_name(const char *name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_integrated(int integrated) { _integrated = integrated; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void set_max_work_items_per_compute_unit(
int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void set_max_register_size_per_work_group(
int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
void set_device_id(uint32_t device_id) { _device_id = device_id; }
void set_uuid(std::array<unsigned char, 16> uuid) { _uuid = std::move(uuid); }
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
uint32_t _device_id;
std::array<unsigned char, 16> _uuid;
};
/// dpct device extension
class device_ext : public sycl::device {
typedef std::mutex mutex_type;
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
}
device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) {
std::lock_guard<mutex_type> lock(m_mutex);
init_queues();
}
int is_native_atomic_supported() { return 0; }
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
/// Return the maximum clock frequency of this device in KHz.
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
int get_max_sub_group_size() const {
return get_device_info().get_max_sub_group_size();
}
int get_max_register_size_per_work_group() const {
return get_device_info().get_max_register_size_per_work_group();
}
int get_max_work_group_size() const {
return get_device_info().get_max_work_group_size();
}
int get_mem_base_addr_align() const {
return get_info<sycl::info::device::mem_base_addr_align>();
}
size_t get_global_mem_size() const {
return get_device_info().get_global_mem_size();
}
/// Get the number of bytes of free and total memory on the SYCL device.
/// \param [out] free_memory The number of bytes of free memory on the SYCL
/// device. \param [out] total_memory The number of bytes of total memory on
/// the SYCL device.
void get_memory_info(size_t &free_memory, size_t &total_memory) {
#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
if (!has(sycl::aspect::ext_intel_free_memory)) {
std::cerr << "get_memory_info: ext_intel_free_memory is not supported."
<< std::endl;
free_memory = 0;
} else {
free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
}
#else
std::cerr << "get_memory_info: ext_intel_free_memory is not supported."
<< std::endl;
free_memory = 0;
#if defined(_MSC_VER) && !defined(__clang__)
#pragma message("Querying the number of bytes of free memory is not supported")
#else
#warning "Querying the number of bytes of free memory is not supported"
#endif
#endif
total_memory = get_device_info().get_global_mem_size();
}
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION < 20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where
// max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated
// by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>() * 1000);
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0) prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
if (this->has(sycl::aspect::ext_intel_device_id)) {
prop.set_device_id(
this->get_info<sycl::ext::intel::info::device::device_id>());
}
if (this->has(sycl::aspect::ext_intel_device_info_uuid)) {
prop.set_uuid(this->get_info<sycl::ext::intel::info::device::uuid>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message( \
"get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning \
"get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
void reset() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
init_queues();
}
sycl::queue &in_order_queue() { return *_q_in_order; }
sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
sycl::queue &default_queue() {
#ifdef DPCT_USM_LEVEL_NONE
return out_of_order_queue();
#else
return in_order_queue();
#endif // DPCT_USM_LEVEL_NONE
}
void queues_wait_and_throw() {
std::unique_lock<mutex_type> lock(m_mutex);
std::vector<std::shared_ptr<sycl::queue>> current_queues(_queues);
lock.unlock();
for (const auto &q : current_queues) {
q->wait_and_throw();
}
// Guard the destruct of current_queues to make sure the ref count is safe.
lock.lock();
}
sycl::queue *create_queue(bool enable_exception_handler = false) {
#ifdef DPCT_USM_LEVEL_NONE
return create_out_of_order_queue(enable_exception_handler);
#else
return create_in_order_queue(enable_exception_handler);
#endif // DPCT_USM_LEVEL_NONE
}
sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler,
sycl::property::queue::in_order());
}
sycl::queue *create_out_of_order_queue(
bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler);
}
void destroy_queue(sycl::queue *&queue) {
std::lock_guard<mutex_type> lock(m_mutex);
_queues.erase(
std::remove_if(_queues.begin(), _queues.end(),
[=](const std::shared_ptr<sycl::queue> &q) -> bool {
return q.get() == queue;
}),
_queues.end());
queue = nullptr;
}
void set_saved_queue(sycl::queue *q) {
std::lock_guard<mutex_type> lock(m_mutex);
_saved_queue = q;
}
sycl::queue *get_saved_queue() const {
std::lock_guard<mutex_type> lock(m_mutex);
return _saved_queue;
}
sycl::context get_context() const { return _ctx; }
private:
void clear_queues() {
_queues.clear();
_q_in_order = _q_out_of_order = _saved_queue = nullptr;
}
void init_queues() {
_q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
_q_out_of_order = create_queue_impl(true);
_saved_queue = &default_queue();
}
/// Caller should acquire resource \p m_mutex before calling this function.
template <class... Properties>
sycl::queue *create_queue_impl(bool enable_exception_handler,
Properties... properties) {
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh,
sycl::property_list(
#ifdef DPCT_PROFILING_ENABLED
sycl::property::queue::enable_profiling(),
#endif
properties...)));
return _queues.back().get();
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i])) break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.') break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_q_in_order, *_q_out_of_order;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable mutex_type m_mutex;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id = current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &cpu_device() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
if (_cpu_device == -1) {
throw std::runtime_error("no valid cpu device");
} else {
return *_devs[_cpu_device];
}
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it = _thread2dev_map.find(get_tid());
if (it != _thread2dev_map.end()) return it->second;
return DEFAULT_DEVICE_ID;
}
/// Select device with a device ID.
/// \param [in] id The id of the device which can
/// be obtained through get_device_id(const sycl::device).
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()] = id;
}
unsigned int device_count() { return _devs.size(); }
unsigned int get_device_id(const sycl::device &dev) {
unsigned int id = 0;
for (auto dev_item : _devs) {
if (*dev_item == dev) {
break;
}
id++;
}
return id;
}
template <class DeviceSelector>
std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
sycl::device selected_device = sycl::device(selector);
unsigned int selected_device_id = get_device_id(selected_device);
select_device(selected_device_id);
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device = sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu()) _cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current selected device depends on
/// the USM config. Return the default out-of-ordered queue when USM-none is
/// enabled, otherwise return the default in-ordered queue.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the default in-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_in_order_queue() {
return dev_mgr::instance().current_device().in_order_queue();
}
/// Util function to get the default out-of-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_out_of_order_queue() {
return dev_mgr::instance().current_device().out_of_order_queue();
}
/// Util function to get the id of current device in
/// dpct device manager.
static inline unsigned int get_current_device_id() {
return dev_mgr::instance().current_device_id();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
/// Util function to get a device by id.
static inline device_ext &get_device(unsigned int id) {
return dev_mgr::instance().get_device(id);
}
/// Util function to get the context of the default queue of current
/// device in dpct device manager.
static inline sycl::context get_default_context() {
return dpct::get_current_device().get_context();
}
/// Util function to get a CPU device.
static inline device_ext &cpu_device() {
return dev_mgr::instance().cpu_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
template <class DeviceSelector>
static inline std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
dev_mgr::instance().select_device(selector);
}
static inline unsigned int get_device_id(const sycl::device &dev) {
return dev_mgr::instance().get_device_id(dev);
}
/// Util function to check whether a device supports some kinds of sycl::aspect.
inline void has_capability_or_fail(
const sycl::device &dev, const std::initializer_list<sycl::aspect> &props) {
for (const auto &it : props) {
if (dev.has(it)) continue;
switch (it) {
case sycl::aspect::fp64:
throw std::runtime_error("'double' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
case sycl::aspect::fp16:
throw std::runtime_error("'half' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
default:
#define __SYCL_ASPECT(ASPECT, ID) \
case sycl::aspect::ASPECT: \
return #ASPECT;
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
switch (AspectNum) {
#include <sycl/info/aspects.def>
#include <sycl/info/aspects_deprecated.def>
default:
return "unknown aspect";
}
};
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
#undef __SYCL_ASPECT_DEPRECATED
#undef __SYCL_ASPECT
throw std::runtime_error(
"'" + getAspectNameStr(it) + "' is not supported in '" +
dev.get_info<sycl::info::device::name>() + "' device");
}
break;
}
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include <cassert>
#include <cstdint>
#include <cstring>
#include <map>
#include <mutex>
#include <sycl/sycl.hpp>
#include <thread>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include "device.hpp"
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size) return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error(
"dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr) return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension>
class accessor;
template <memory_region Memory, class T = byte_t>
class memory_traits {
public:
static constexpr sycl::access::target target = sycl::access::target::device;
static constexpr sycl::access_mode mode = (Memory == constant)
? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr, int value,
size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event> dpct_memset(sycl::queue &q,
pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event> dpct_memset(sycl::queue &q, void *ptr,
size_t pitch, int val,
size_t x, size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction direction_table
[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] = {
{memcpy_direction::host_to_host, memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q,
from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event dpct_memcpy(
sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size) return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice, size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event> dpct_memcpy(
sycl::queue &q, void *to_ptr, const void *from_ptr, sycl::range<3> to_range,
sycl::range<3> from_range, sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), to_surface,
buf.get_size(), device_to_host,
dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(), device_to_host,
dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)),
from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size, [=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size, [=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event> dpct_memcpy(
sycl::queue &q, pitched_data to, sycl::id<3> to_id, pitched_data from,
sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id,
from_id, size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event> dpct_memcpy(
sycl::queue &q, void *to_ptr, const void *from_ptr, size_t to_pitch,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1), sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U>
struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const {
return _impl == other._impl;
}
bool operator!=(const usm_allocator &other) const {
return _impl != other._impl;
}
};
} // namespace deprecated
inline void dpct_free(void *ptr, const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template <class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T>
static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr) return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode> get_access(const void *ptr,
sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T>
static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr = BufferOffset.first.get_host_access().get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data dpct_malloc(sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr, sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be
/// freed. \param events The events to be waited. \param q The sycl::queue the
/// memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void async_dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y, sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val, sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension>
class accessor;
template <class T, memory_region Memory>
class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory>
class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)),
_range(range_in),
_reference(false),
_host_ptr(nullptr),
_device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference) dpct::dpct_free(_device_ptr);
if (_host_ptr) std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() { init(dpct::get_default_queue()); }
/// Allocate memory with specified queue, and init memory if has initial
/// value.
void init(sycl::queue &q) {
if (_device_ptr) return;
if (!_size) return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() { return get_ptr(get_default_queue()); }
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type get_access(
sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size),
_range(size / sizeof(T)),
_reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(_size, q.get_device(),
q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
} // namespace detail
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator =
detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator =
detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr, sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type != sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type != sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device)
? ptr
: nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() { return memory_type; }
const void *get_device_pointer() { return device_pointer; }
const void *get_host_pointer() { return host_pointer; }
bool is_memory_shared() { return memory_type == sycl::usm::alloc::shared; }
unsigned int get_device_id() { return device_id; }
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include "dpl_extras/algorithm.h"
#include "dpl_extras/dpcpp_extensions.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/memory.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/vector.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T>
bool isnan(const T a) {
return sycl::isnan(a);
}
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i) f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T>
inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i) ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool> compare_both(
const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool> unordered_compare_both(
const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T> compare(
const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T> unordered_compare(
const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T>
inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f) return 0.f;
return a;
}
template <class T>
inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T>
inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b)) return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T>
inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b)) return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T>
auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T>
inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T>
inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T>
inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include <thread>
#include <utility>
#include <vector>
#include "lib_common_utils.hpp"
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array) sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template <typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced) _temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced) return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy,
res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy,
res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy,
res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
std::uint64_t key =
detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(q, n, reinterpret_cast<const float *>(x),
incx, reinterpret_cast<const float *>(y),
incy, reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val, data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val, data_x, incx, data_y,
incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx, data_y, incy,
c_value, s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(q, a_trans, b_trans, m, n, k,
alpha_value, data_a, lda, data_b, ldb,
beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, stride_a, data_b,
ldb, stride_b, beta_value, data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T +
// beta*C For Hermitian matrix, this function performs: C =
// alpha*OP(A)*(OP(B))^H + beta*C The gemmt() function performs: C =
// alpha*OPA(A)*OPB(B) + beta*C So the OPB need be updated before we call
// gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> &&
trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer =
sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, new_B_buffer,
origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols, beta_value,
data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(q, uplo, trans_A, trans_B, n, k,
alpha_value, data_a, lda, data_b,
ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, int lda,
void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info),
uplo_info(uplo_info),
transpose_info(transpose_info),
diag_info(diag_info),
value_info(value_info),
groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[], int lda,
int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size,
scratchpad, scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad, scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer,
ldb, stride_b, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue
.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
})
.wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared,
b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n, const T *a[],
int lda, int *ipiv, T *b[], int ldb, int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b,
ipiv_buf, stride_ipiv, batch_size,
scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared,
b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n, T *a[],
int lda, T *tau[], int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size =
oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size,
scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
(Ty **)tau_shared, 1, &group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x,
library_data_t x_type, int incx, void *y,
library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y,
incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type, int incx,
void *y, library_data_t y_type, int incy, const void *c,
const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y,
incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y,
incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(
q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(
q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y,
incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the
/// matrix C. \param [in] n Specifies the number of columns of the matrix op(B)
/// and of the matrix C. \param [in] k Specifies the number of columns of the
/// matrix op(A) and the number of rows of the matrix op(B). \param [in] alpha
/// Scaling factor for the matrix-matrix product. \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half,
c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb,
&beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the
/// matrix C. \param [in] n Specifies the number of columns of the matrix op(B)
/// and of the matrix C. \param [in] k Specifies the number of columns of the
/// matrix op(A) and the number of rows of the matrix op(B). \param [in] alpha
/// Scaling factor for the matrix-matrix product. \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to
/// perform. \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
float, float>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb,
&beta_float, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half,
c, ldc, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the
/// matrix C. \param [in] n Specifies the number of columns of the matrix op(B)
/// and of the matrix C. \param [in] k Specifies the number of columns of the
/// matrix op(A) and the number of rows of the matrix op(B). \param [in] alpha
/// Scaling factor for the matrix-matrix product. \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to
/// perform. \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
float, float>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb,
stride_b, &beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower
/// triangle. \param [in] trans Specifies the operation to apply. \param [in] n
/// The number of rows and columns in C. \param [in] k The inner dimension of
/// matrix multiplications. \param [in] alpha Scaling factor for the rank-k
/// update. \param [in] a Input matrix A. \param [in] lda Leading dimension of
/// A. \param [in] b Input matrix B. \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b, ldb,
beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower
/// triangle. \param [in] trans Specifies the operation to apply. \param [in] n
/// The number of rows and columns in C. \param [in] k The inner dimension of
/// matrix multiplications. \param [in] alpha Scaling factor for the rank-k
/// update. \param [in] a Input matrix A. \param [in] lda Leading dimension of
/// A. \param [in] b Input matrix B. \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b, ldb,
beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at
/// the addr and assign the result to the value at addr. \param [in, out] addr
/// The pointer to the data. \param operand The value to use in bitwise AND
/// operation with the value at the \p addr. \param memoryOrder The memory
/// ordering used. \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at
/// the addr and assign the result to the value at addr. \param [in, out] addr
/// The pointer to the data. \param operand The value to use in bitwise AND
/// operation with the value at the \p addr. \param memoryOrder The memory
/// ordering used. \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value
/// stored in \p addr is equal to zero or greater than \p operand, else decrease
/// the value stored in \p addr. \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm =
sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(
addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand)) break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm =
sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(
addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0)) break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value
/// expected. Returns the value at the \p addr before the call. \param [in, out]
/// addr Multi_ptr. \param expected The value to compare against the value at \p
/// addr. \param desired The value to assign to \p addr if the value at \p addr
/// is expected. \param success The memory ordering used when comparison
/// succeeds. \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value
/// expected. Returns the value at the \p addr before the call. \param [in] addr
/// The pointer to the data. \param expected The value to compare against the
/// value at \p addr. \param desired The value to assign to \p addr if the value
/// at \p addr is expected. \param success The memory ordering used when
/// comparison succeeds. \param fail The memory ordering used when comparison
/// fails. \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail {
template <typename T>
struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic {
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope,
Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope,
Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic
/// argument \param operand The value to replace the pointed value. \param
/// memoryOrder The memory ordering used. \param memoryScope The memory scope
/// used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the
/// value held previously \param operand The value to replace the pointed
/// value. \param memoryOrder The memory ordering used. \param memoryScope The
/// memory scope used. \returns The value of the referenced object before the
/// call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic
/// argument and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param success The memory models for the
/// read-modify-write \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false
/// otherwise.
bool compare_exchange_weak(
T &expected, T desired, sycl::memory_order success,
sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure,
memoryScope);
}
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param memoryOrder The memory
/// synchronization ordering for operations \param memoryScope The memory
/// scope used. \returns true if the referenced object was successfully
/// changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder,
memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic
/// argument and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param success The memory models for the
/// read-modify-write \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false
/// otherwise.
bool compare_exchange_strong(
T &expected, T desired, sycl::memory_order success,
sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure,
memoryScope);
}
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param memoryOrder The memory
/// synchronization ordering for operations \param memoryScope The memory
/// scope used. \returns true if the referenced object was successfully
/// changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder,
memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and
/// obtains the value held previously \param operand The other argument of
/// arithmetic addition \param memoryOrder The memory ordering used. \param
/// memoryScope The memory scope used. \returns The value of the referenced
/// object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic
/// object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t>
class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc>
class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc>
struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T>
auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp>
class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp>
class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp>
struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp> operator()(
const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T>
struct __zip_iterator_impl;
template <class... Ts>
struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator operator+(
difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT>
struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(
std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type,
Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(
std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type,
Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1> remove_if(
Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(
policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(
policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result,
Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6> set_symmetric_difference(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_symmetric_difference(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1> stable_partition(
Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1> partition(
Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void mark_segments(_ExecutionPolicy &&policy,
OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n,
int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters) keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments <
512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments <
512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value,
StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable {
using type = T;
};
template <>
struct make_allocatable<void> {
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T>
class device_pointer;
#endif
template <typename T>
struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T>
void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType *)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T>
class device_iterator;
template <typename ValueType, typename Derived>
class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T>
class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T>
void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T>
device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer>
Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T>
const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T>
T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T>
const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T>
T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <algorithm>
#include <iterator>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include <vector>
#include "../device.hpp"
#include "memory.h"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA>
operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()),
_size(other.size()),
_capacity(other.capacity()),
_storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(
InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(
InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(
InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()),
_storage(v.real_begin()),
_size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void assign(
InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0) --_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp,
tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void insert(
iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp,
tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA>
operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void assign(
InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0) --_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void insert(
iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <stdexcept>
#include <sycl/sycl.hpp>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args>
constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void exclusive_scan(const Item &item,
T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T exclusive_scan(const Item &item, T input, T init,
BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0>
struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT>
struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false>
class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void rank_keys(const Item &item,
uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD],
int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void exclusive_downsweep(const Item &item,
packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U>
struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U>
struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U>
struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U>
struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T>
struct traits : base_traits<T, T> {};
template <>
struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <>
struct traits<int> : base_traits<int, uint32_t> {};
template <>
struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N>
struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD>
class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void scatter_to_blocked(Item item,
T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void sort(const Item &item, T (&keys)[VALUES_PER_THREAD],
int begin_bit = 0, int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit) break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T reduce(Item item, T (&inputs)[VALUES_PER_THREAD],
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>,
T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void inclusive_scan(const Item &item,
T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD],
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args>
constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts>
struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp>
struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp>
struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T>
struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T>
struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp>
class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp>
struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp>
class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp>
class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName>
struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less>
struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate>
struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1>
result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate>
struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1>
result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T>
struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T>
result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate>
struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T>
result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate>
struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2>
T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T &&t) const {
using std::get;
if (pred(get<0>(t))) get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op)
: pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T &&t) const {
using std::get;
if (pred(get<1>(t))) get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t))) get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N>
struct uint_byte_map {};
template <>
struct uint_byte_map<1> {
using type = uint8_t;
};
template <>
struct uint_byte_map<2> {
using type = uint16_t;
};
template <>
struct uint_byte_map<4> {
using type = uint32_t;
};
template <>
struct uint_byte_map<8> {
using type = uint64_t;
};
template <typename T>
struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT>
class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up
// 4
// in
// tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3
// in
// tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in
// tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in
// tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in
// tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in
// tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up
// 2
// in
// tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in
// tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4
// in
// tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in
// tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4
// in
// tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in
// tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in
// tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in
// tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <helper_string.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dpct/dpct.hpp>
#include <sycl/sycl.hpp>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:4: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CUFFT_SUCCESS";
case 1:
return "CUFFT_INVALID_PLAN";
case 2:
return "CUFFT_ALLOC_FAILED";
case 3:
return "CUFFT_INVALID_TYPE";
case 4:
return "CUFFT_INVALID_VALUE";
case 5:
return "CUFFT_INTERNAL_ERROR";
case 6:
return "CUFFT_EXEC_FAILED";
case 7:
return "CUFFT_SETUP_FAILED";
case 8:
return "CUFFT_INVALID_SIZE";
case 9:
return "CUFFT_UNALIGNED_DATA";
case 10:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case 11:
return "CUFFT_INVALID_DEVICE";
case 12:
return "CUFFT_PARSE_ERROR";
case 13:
return "CUFFT_NO_WORKSPACE";
case 14:
return "CUFFT_NOT_IMPLEMENTED";
case 15:
return "CUFFT_LICENSE_ERROR";
case 16:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:5: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:7: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_554348 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128},
{0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128},
{0x70, 64}, {0x72, 64}, {0x75, 64}, {0x80, 64}, {0x86, 128},
{0x87, 128}, {0x89, 128}, {0x90, 128}, {-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char *_ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_876740 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char *name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"}, {0x32, "Kepler"}, {0x35, "Kepler"},
{0x37, "Kepler"}, {0x50, "Maxwell"}, {0x52, "Maxwell"},
{0x53, "Maxwell"}, {0x60, "Pascal"}, {0x61, "Pascal"},
{0x62, "Pascal"}, {0x70, "Volta"}, {0x72, "Xavier"},
{0x75, "Turing"}, {0x80, "Ampere"}, {0x86, "Ampere"},
{0x87, "Ampere"}, {0x89, "Ada"}, {0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:9: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:10: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:11: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID,
_ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:12: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf =
(uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
} catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:14: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID,
_ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:15: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:16: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:17: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major,
minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version && minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Samples/4_CUDA_Libraries/oceanFFT/oceanFFT.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
FFT-based Ocean simulation
based on original code by Yury Uralsky and Calvin Lin
This sample demonstrates how to use CUFFT to synthesize and
render an ocean surface in real-time.
See Jerry Tessendorf's Siggraph course notes for more details:
http://tessendorf.org/reports.html
It also serves as an example of how to generate multiple vertex
buffer streams from CUDA and render them using GLSL shaders.
*/
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#define WINDOWS_LEAN_AND_MEAN
#define NOMINMAX
#include <windows.h>
#endif
// includes
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dpct/dpct.hpp>
#include <dpct/fft_utils.hpp>
#include <sycl/sycl.hpp>
// #include <helper_gl.h>
// #include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// #if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
// #include <GLUT/glut.h>
// #else
// #include <GL/freeglut.h>
// #endif
// #include <rendercheck_gl.h>
const char *sSDKsample = "CUDA FFT Ocean Simulation";
#define MAX_EPSILON 0.10f
#define THRESHOLD 0.15f
#define REFRESH_DELAY 10 // ms
////////////////////////////////////////////////////////////////////////////////
// constants
unsigned int windowW = 512, windowH = 512;
const unsigned int meshSize = 256;
const unsigned int spectrumW = meshSize + 4;
const unsigned int spectrumH = meshSize + 1;
const int frameCompare = 4;
// OpenGL vertex buffers
// GLuint posVertexBuffer;
// GLuint heightVertexBuffer, slopeVertexBuffer;
// struct cudaGraphicsResource *cuda_posVB_resource, *cuda_heightVB_resource,
// *cuda_slopeVB_resource; // handles OpenGL-CUDA exchange
// GLuint indexBuffer;
// GLuint shaderProg;
/*char *vertShaderPath = 0, *fragShaderPath = 0;
// mouse controls
int mouseOldX, mouseOldY;
int mouseButtons = 0;
float rotateX = 20.0f, rotateY = 0.0f;
float translateX = 0.0f, translateY = 0.0f, translateZ = -2.0f;*/
bool animate = true;
/*bool drawPoints = false;
bool wireFrame = false;
bool g_hasDouble = false;*/
// FFT data
dpct::fft::fft_engine_ptr fftPlan;
sycl::float2 *d_h0 = 0; // heightfield at time 0
sycl::float2 *h_h0 = 0;
sycl::float2 *d_ht = 0; // heightfield at time t
sycl::float2 *d_slope = 0;
// pointers to device object
float *g_hptr = NULL;
sycl::float2 *g_sptr = NULL;
// simulation parameters
const float g = 9.81f; // gravitational constant
const float A = 1e-7f; // wave scale factor
const float patchSize = 100; // patch size
float windSpeed = 100.0f;
float windDir = 3.141592654F / 3.0f;
float dirDepend = 0.07f;
StopWatchInterface *timer = NULL;
float animTime = 0.0f;
float prevTime = 0.0f;
float animationRate = -0.001f;
// Auto-Verification Code
const int frameCheckNumber = 4;
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
unsigned int frameCount = 0;
unsigned int g_TotalErrors = 0;
////////////////////////////////////////////////////////////////////////////////
// kernels
// #include <oceanFFT_kernel.cu>
extern "C" void cudaGenerateSpectrumKernel(sycl::float2 *d_h0,
sycl::float2 *d_ht,
unsigned int in_width,
unsigned int out_width,
unsigned int out_height,
float animTime, float patchSize);
extern "C" void cudaUpdateHeightmapKernel(float *d_heightMap,
sycl::float2 *d_ht,
unsigned int width,
unsigned int height, bool autoTest);
extern "C" void cudaCalculateSlopeKernel(float *h, sycl::float2 *slopeOut,
unsigned int width,
unsigned int height);
////////////////////////////////////////////////////////////////////////////////
// forward declarations
void runAutoTest(int argc, char **argv);
// void runGraphicsTest(int argc, char **argv);
// GL functionality
// bool initGL(int *argc, char **argv);
// void createVBO(GLuint *vbo, int size);
// void deleteVBO(GLuint *vbo);
// void createMeshIndexBuffer(GLuint *id, int w, int h);
// void createMeshPositionVBO(GLuint *id, int w, int h);
// GLuint loadGLSLProgram(const char *vertFileName, const char *fragFileName);
// rendering callbacks
// void display();
// void keyboard(unsigned char key, int x, int y);
// void mouse(int button, int state, int x, int y);
// void motion(int x, int y);
// void reshape(int w, int h);
void timerEvent(int value);
// Cuda functionality
// void runCuda();
void runCudaTest(char *exec_path);
void generate_h0(sycl::float2 *h0);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
printf(
"NOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n\n");
// check for command line arguments
if (checkCmdLineFlag(argc, (const char **)argv, "qatest")) {
animate = false;
fpsLimit = frameCheckNumber;
runAutoTest(argc, argv);
} /* else {
printf(
"[%s]\n\n"
"Left mouse button - rotate\n"
"Middle mouse button - pan\n"
"Right mouse button - zoom\n"
"'w' key - toggle wireframe\n",
sSDKsample);
runGraphicsTest(argc, argv);
}*/
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
void runAutoTest(int argc, char **argv) {
printf("%s Starting...\n\n", argv[0]);
// Cuda init
int dev = findCudaDevice(argc, (const char **)argv);
dpct::device_info deviceProp;
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::dev_mgr::instance().get_device(dev).get_device_info(deviceProp)));
/*
DPCT1005:18: The SYCL device version is different from CUDA Compute
Compatibility. You may need to rewrite this code.
*/
printf("Compute capability %d.%d\n", deviceProp.get_major_version(),
deviceProp.get_minor_version());
// create FFT plan
checkCudaErrors(DPCT_CHECK_ERROR(
fftPlan = dpct::fft::fft_engine::create(
&dpct::get_default_queue(), meshSize, meshSize,
dpct::fft::fft_type::complex_float_to_complex_float)));
// allocate memory
int spectrumSize = spectrumW * spectrumH * sizeof(sycl::float2);
checkCudaErrors(
DPCT_CHECK_ERROR(d_h0 = (sycl::float2 *)sycl::malloc_device(
spectrumSize, dpct::get_default_queue())));
h_h0 = (sycl::float2 *)malloc(spectrumSize);
generate_h0(h_h0);
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memcpy(d_h0, h_h0, spectrumSize).wait()));
int outputSize = meshSize * meshSize * sizeof(sycl::float2);
checkCudaErrors(DPCT_CHECK_ERROR(d_ht = (sycl::float2 *)sycl::malloc_device(
outputSize, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(d_slope = (sycl::float2 *)sycl::malloc_device(
outputSize, dpct::get_default_queue())));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
prevTime = sdkGetTimerValue(&timer);
runCudaTest(argv[0]);
printf("Processing time : %f (ms)\n", sdkGetTimerValue(&timer));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_ht, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_slope, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_h0, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(dpct::fft::fft_engine::destroy(fftPlan)));
free(h_h0);
exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
/*void runGraphicsTest(int argc, char **argv) {
#if defined(__linux__)
setenv("DISPLAY", ":0", 0);
#endif
printf("[%s] ", sSDKsample);
printf("\n");
if (checkCmdLineFlag(argc, (const char **)argv, "device")) {
printf("[%s]\n", argv[0]);
printf(" Does not explicitly support -device=n in OpenGL mode\n");
printf(" To use -device=n, the sample must be running w/o OpenGL\n\n");
printf(" > %s -device=n -qatest\n", argv[0]);
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA
// interop.
if (false == initGL(&argc, argv)) {
return;
}
findCudaDevice(argc, (const char **)argv);
// create FFT plan
checkCudaErrors(cufftPlan2d(&fftPlan, meshSize, meshSize, CUFFT_C2C));
// allocate memory
int spectrumSize = spectrumW * spectrumH * sizeof(float2);
checkCudaErrors(cudaMalloc((void **)&d_h0, spectrumSize));
h_h0 = (float2 *)malloc(spectrumSize);
generate_h0(h_h0);
checkCudaErrors(cudaMemcpy(d_h0, h_h0, spectrumSize, cudaMemcpyHostToDevice));
int outputSize = meshSize * meshSize * sizeof(float2);
checkCudaErrors(cudaMalloc((void **)&d_ht, outputSize));
checkCudaErrors(cudaMalloc((void **)&d_slope, outputSize));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
prevTime = sdkGetTimerValue(&timer);
// create vertex buffers and register with CUDA
createVBO(&heightVertexBuffer, meshSize * meshSize * sizeof(float));
checkCudaErrors(
cudaGraphicsGLRegisterBuffer(&cuda_heightVB_resource, heightVertexBuffer,
cudaGraphicsMapFlagsWriteDiscard));
createVBO(&slopeVertexBuffer, outputSize);
checkCudaErrors(
cudaGraphicsGLRegisterBuffer(&cuda_slopeVB_resource, slopeVertexBuffer,
cudaGraphicsMapFlagsWriteDiscard));
// create vertex and index buffer for mesh
createMeshPositionVBO(&posVertexBuffer, meshSize, meshSize);
createMeshIndexBuffer(&indexBuffer, meshSize, meshSize);
runCuda();
// register callbacks
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutReshapeFunc(reshape);
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
// start rendering mainloop
glutMainLoop();
}*/
float urand() { return rand() / (float)RAND_MAX; }
// Generates Gaussian random number with mean 0 and standard deviation 1.
float gauss() {
float u1 = urand();
float u2 = urand();
if (u1 < 1e-6f) {
u1 = 1e-6f;
}
return sqrtf(-2 * logf(u1)) * cosf(2 * 3.141592654F * u2);
}
// Phillips spectrum
// (Kx, Ky) - normalized wave vector
// Vdir - wind angle in radians
// V - wind speed
// A - constant
float phillips(float Kx, float Ky, float Vdir, float V, float A,
float dir_depend) {
float k_squared = Kx * Kx + Ky * Ky;
if (k_squared == 0.0f) {
return 0.0f;
}
// largest possible wave from constant wind of velocity v
float L = V * V / g;
float k_x = Kx / sqrtf(k_squared);
float k_y = Ky / sqrtf(k_squared);
float w_dot_k = k_x * cosf(Vdir) + k_y * sinf(Vdir);
float phillips = A * expf(-1.0f / (k_squared * L * L)) /
(k_squared * k_squared) * w_dot_k * w_dot_k;
// filter out waves moving opposite to wind
if (w_dot_k < 0.0f) {
phillips *= dir_depend;
}
// damp out waves with very small length w << l
// float w = L / 10000;
// phillips *= expf(-k_squared * w * w);
return phillips;
}
// Generate base heightfield in frequency space
void generate_h0(sycl::float2 *h0) {
for (unsigned int y = 0; y <= meshSize; y++) {
for (unsigned int x = 0; x <= meshSize; x++) {
float kx =
(-(int)meshSize / 2.0f + x) * (2.0f * 3.141592654F / patchSize);
float ky =
(-(int)meshSize / 2.0f + y) * (2.0f * 3.141592654F / patchSize);
float P = sqrtf(phillips(kx, ky, windDir, windSpeed, A, dirDepend));
if (kx == 0.0f && ky == 0.0f) {
P = 0.0f;
}
// float Er = urand()*2.0f-1.0f;
// float Ei = urand()*2.0f-1.0f;
float Er = gauss();
float Ei = gauss();
float h0_re = Er * P * CUDART_SQRT_HALF_F;
float h0_im = Ei * P * CUDART_SQRT_HALF_F;
int i = y * spectrumW + x;
h0[i].x() = h0_re;
h0[i].y() = h0_im;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda kernels
////////////////////////////////////////////////////////////////////////////////
/*void runCuda() {
size_t num_bytes;
// generate wave spectrum in frequency domain
cudaGenerateSpectrumKernel(d_h0, d_ht, spectrumW, meshSize, meshSize,
animTime, patchSize);
// execute inverse FFT to convert to spatial domain
checkCudaErrors(cufftExecC2C(fftPlan, d_ht, d_ht, CUFFT_INVERSE));
// update heightmap values in vertex buffer
checkCudaErrors(cudaGraphicsMapResources(1, &cuda_heightVB_resource, 0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer(
(void **)&g_hptr, &num_bytes, cuda_heightVB_resource));
cudaUpdateHeightmapKernel(g_hptr, d_ht, meshSize, meshSize, false);
// calculate slope for shading
checkCudaErrors(cudaGraphicsMapResources(1, &cuda_slopeVB_resource, 0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer(
(void **)&g_sptr, &num_bytes, cuda_slopeVB_resource));
cudaCalculateSlopeKernel(g_hptr, g_sptr, meshSize, meshSize);
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_heightVB_resource, 0));
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_slopeVB_resource, 0));
}*/
void runCudaTest(char *exec_path) {
checkCudaErrors(
DPCT_CHECK_ERROR(g_hptr = sycl::malloc_device<float>(
meshSize * meshSize, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(g_sptr = sycl::malloc_device<sycl::float2>(
meshSize * meshSize, dpct::get_default_queue())));
// generate wave spectrum in frequency domain
cudaGenerateSpectrumKernel(d_h0, d_ht, spectrumW, meshSize, meshSize,
animTime, patchSize);
// execute inverse FFT to convert to spatial domain
checkCudaErrors(
DPCT_CHECK_ERROR((fftPlan->compute<sycl::float2, sycl::float2>(
d_ht, d_ht, dpct::fft::fft_direction::backward))));
// update heightmap values
cudaUpdateHeightmapKernel(g_hptr, d_ht, meshSize, meshSize, true);
{
float *hptr = (float *)malloc(meshSize * meshSize * sizeof(float));
dpct::get_default_queue()
.memcpy((void *)hptr, (void *)g_hptr,
meshSize * meshSize * sizeof(float))
.wait();
sdkDumpBin((void *)hptr, meshSize * meshSize * sizeof(float),
"spatialDomain.bin");
if (!sdkCompareBin2BinFloat("spatialDomain.bin", "ref_spatialDomain.bin",
meshSize * meshSize, MAX_EPSILON, THRESHOLD,
exec_path)) {
g_TotalErrors++;
}
free(hptr);
}
// calculate slope for shading
cudaCalculateSlopeKernel(g_hptr, g_sptr, meshSize, meshSize);
{
sycl::float2 *sptr =
(sycl::float2 *)malloc(meshSize * meshSize * sizeof(sycl::float2));
dpct::get_default_queue()
.memcpy((void *)sptr, (void *)g_sptr,
meshSize * meshSize * sizeof(sycl::float2))
.wait();
sdkDumpBin(sptr, meshSize * meshSize * sizeof(sycl::float2),
"slopeShading.bin");
if (!sdkCompareBin2BinFloat("slopeShading.bin", "ref_slopeShading.bin",
meshSize * meshSize * 2, MAX_EPSILON, THRESHOLD,
exec_path)) {
g_TotalErrors++;
}
free(sptr);
}
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(g_hptr, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(g_sptr, dpct::get_default_queue())));
}
// void computeFPS()
//{
// frameCount++;
// fpsCount++;
//
// if (fpsCount == fpsLimit) {
// fpsCount = 0;
// }
//}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
/*void display() {
// run CUDA kernel to generate vertex positions
if (animate) {
runCuda();
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(translateX, translateY, translateZ);
glRotatef(rotateX, 1.0, 0.0, 0.0);
glRotatef(rotateY, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, posVertexBuffer);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, heightVertexBuffer);
glClientActiveTexture(GL_TEXTURE0);
glTexCoordPointer(1, GL_FLOAT, 0, 0);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, slopeVertexBuffer);
glClientActiveTexture(GL_TEXTURE1);
glTexCoordPointer(2, GL_FLOAT, 0, 0);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glUseProgram(shaderProg);
// Set default uniform variables parameters for the vertex shader
GLuint uniHeightScale, uniChopiness, uniSize;
uniHeightScale = glGetUniformLocation(shaderProg, "heightScale");
glUniform1f(uniHeightScale, 0.5f);
uniChopiness = glGetUniformLocation(shaderProg, "chopiness");
glUniform1f(uniChopiness, 1.0f);
uniSize = glGetUniformLocation(shaderProg, "size");
glUniform2f(uniSize, (float)meshSize, (float)meshSize);
// Set default uniform variables parameters for the pixel shader
GLuint uniDeepColor, uniShallowColor, uniSkyColor, uniLightDir;
uniDeepColor = glGetUniformLocation(shaderProg, "deepColor");
glUniform4f(uniDeepColor, 0.0f, 0.1f, 0.4f, 1.0f);
uniShallowColor = glGetUniformLocation(shaderProg, "shallowColor");
glUniform4f(uniShallowColor, 0.1f, 0.3f, 0.3f, 1.0f);
uniSkyColor = glGetUniformLocation(shaderProg, "skyColor");
glUniform4f(uniSkyColor, 1.0f, 1.0f, 1.0f, 1.0f);
uniLightDir = glGetUniformLocation(shaderProg, "lightDir");
glUniform3f(uniLightDir, 0.0f, 1.0f, 0.0f);
// end of uniform settings
glColor3f(1.0, 1.0, 1.0);
if (drawPoints) {
glDrawArrays(GL_POINTS, 0, meshSize * meshSize);
} else {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glPolygonMode(GL_FRONT_AND_BACK, wireFrame ? GL_LINE : GL_FILL);
glDrawElements(GL_TRIANGLE_STRIP, ((meshSize * 2) + 2) * (meshSize - 1),
GL_UNSIGNED_INT, 0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
glDisableClientState(GL_VERTEX_ARRAY);
glClientActiveTexture(GL_TEXTURE0);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glClientActiveTexture(GL_TEXTURE1);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glUseProgram(0);
glutSwapBuffers();
// computeFPS();
}
void timerEvent(int value) {
float time = sdkGetTimerValue(&timer);
if (animate) {
animTime += (time - prevTime) * animationRate;
}
glutPostRedisplay();
prevTime = time;
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
}
void cleanup() {
sdkDeleteTimer(&timer);
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_heightVB_resource));
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_slopeVB_resource));
deleteVBO(&posVertexBuffer);
deleteVBO(&heightVertexBuffer);
deleteVBO(&slopeVertexBuffer);
checkCudaErrors(cudaFree(d_h0));
checkCudaErrors(cudaFree(d_slope));
checkCudaErrors(cudaFree(d_ht));
free(h_h0);
cufftDestroy(fftPlan);
}*/
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
// void keyboard(unsigned char key, int /*x*/, int /*y*/) {
/* switch (key) {
case (27):
cleanup();
exit(EXIT_SUCCESS);
case 'w':
wireFrame = !wireFrame;
break;
case 'p':
drawPoints = !drawPoints;
break;
case ' ':
animate = !animate;
break;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y) {
if (state == GLUT_DOWN) {
mouseButtons |= 1 << button;
} else if (state == GLUT_UP) {
mouseButtons = 0;
}
mouseOldX = x;
mouseOldY = y;
glutPostRedisplay();
}
void motion(int x, int y) {
float dx, dy;
dx = (float)(x - mouseOldX);
dy = (float)(y - mouseOldY);
if (mouseButtons == 1) {
rotateX += dy * 0.2f;
rotateY += dx * 0.2f;
} else if (mouseButtons == 2) {
translateX += dx * 0.01f;
translateY -= dy * 0.01f;
} else if (mouseButtons == 4) {
translateZ += dy * 0.01f;
}
mouseOldX = x;
mouseOldY = y;
}
void reshape(int w, int h) {
glViewport(0, 0, w, h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (double)w / (double)h, 0.1, 10.0);
windowW = w;
windowH = h;
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
bool initGL(int *argc, char **argv) {
// Create GL context
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(windowW, windowH);
glutCreateWindow("CUDA FFT Ocean Simulation");
vertShaderPath = sdkFindFilePath("ocean.vert", argv[0]);
fragShaderPath = sdkFindFilePath("ocean.frag", argv[0]);
if (vertShaderPath == NULL || fragShaderPath == NULL) {
fprintf(stderr, "Error unable to find GLSL vertex and fragment shaders!\n");
exit(EXIT_FAILURE);
}
// initialize necessary OpenGL extensions
if (!isGLVersionSupported(2, 0)) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return false;
}
if (!areGLExtensionsSupported(
"GL_ARB_vertex_buffer_object GL_ARB_pixel_buffer_object")) {
fprintf(stderr, "Error: failed to get minimal extensions for demo\n");
fprintf(stderr, "This sample requires:\n");
fprintf(stderr, " OpenGL version 1.5\n");
fprintf(stderr, " GL_ARB_vertex_buffer_object\n");
fprintf(stderr, " GL_ARB_pixel_buffer_object\n");
cleanup();
exit(EXIT_FAILURE);
}
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glEnable(GL_DEPTH_TEST);
// load shader
shaderProg = loadGLSLProgram(vertShaderPath, fragShaderPath);
SDK_CHECK_ERROR_GL();
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint *vbo, int size) {
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
SDK_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO(GLuint *vbo) {
glDeleteBuffers(1, vbo);
*vbo = 0;
}
// create index buffer for rendering quad mesh
void createMeshIndexBuffer(GLuint *id, int w, int h) {
int size = ((w * 2) + 2) * (h - 1) * sizeof(GLuint);
// create index buffer
glGenBuffers(1, id);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, *id);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, size, 0, GL_STATIC_DRAW);
// fill with indices for rendering mesh as triangle strips
GLuint *indices =
(GLuint *)glMapBuffer(GL_ELEMENT_ARRAY_BUFFER, GL_WRITE_ONLY);
if (!indices) {
return;
}
for (int y = 0; y < h - 1; y++) {
for (int x = 0; x < w; x++) {
*indices++ = y * w + x;
*indices++ = (y + 1) * w + x;
}
// start new strip with degenerate triangle
*indices++ = (y + 1) * w + (w - 1);
*indices++ = (y + 1) * w;
}
glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
// create fixed vertex buffer to store mesh vertices
void createMeshPositionVBO(GLuint *id, int w, int h) {
createVBO(id, w * h * 4 * sizeof(float));
glBindBuffer(GL_ARRAY_BUFFER, *id);
float *pos = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
if (!pos) {
return;
}
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
float u = x / (float)(w - 1);
float v = y / (float)(h - 1);
*pos++ = u * 2.0f - 1.0f;
*pos++ = 0.0f;
*pos++ = v * 2.0f - 1.0f;
*pos++ = 1.0f;
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
// Attach shader to a program
int attachShader(GLuint prg, GLenum type, const char *name) {
GLuint shader;
FILE *fp;
int size, compiled;
char *src;
fp = fopen(name, "rb");
if (!fp) {
return 0;
}
fseek(fp, 0, SEEK_END);
size = ftell(fp);
src = (char *)malloc(size);
fseek(fp, 0, SEEK_SET);
fread(src, sizeof(char), size, fp);
fclose(fp);
shader = glCreateShader(type);
glShaderSource(shader, 1, (const char **)&src, (const GLint *)&size);
glCompileShader(shader);
glGetShaderiv(shader, GL_COMPILE_STATUS, (GLint *)&compiled);
if (!compiled) {
char log[2048];
int len;
glGetShaderInfoLog(shader, 2048, (GLsizei *)&len, log);
printf("Info log: %s\n", log);
glDeleteShader(shader);
return 0;
}
free(src);
glAttachShader(prg, shader);
glDeleteShader(shader);
return 1;
}
// Create shader program from vertex shader and fragment shader files
GLuint loadGLSLProgram(const char *vertFileName, const char *fragFileName) {
GLint linked;
GLuint program;
program = glCreateProgram();
if (!attachShader(program, GL_VERTEX_SHADER, vertFileName)) {
glDeleteProgram(program);
fprintf(stderr, "Couldn't attach vertex shader from file %s\n",
vertFileName);
return 0;
}
if (!attachShader(program, GL_FRAGMENT_SHADER, fragFileName)) {
glDeleteProgram(program);
fprintf(stderr, "Couldn't attach fragment shader from file %s\n",
fragFileName);
return 0;
}
glLinkProgram(program);
glGetProgramiv(program, GL_LINK_STATUS, &linked);
if (!linked) {
glDeleteProgram(program);
char temp[256];
glGetProgramInfoLog(program, 256, 0, temp);
fprintf(stderr, "Failed to link program: %s\n", temp);
return 0;
}
return program;
}*/
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/Samples/4_CUDA_Libraries/oceanFFT/oceanFFT_kernel.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///////////////////////////////////////////////////////////////////////////////
#include <dpct/dpct.hpp>
#include <dpct/fft_utils.hpp>
#include <sycl/sycl.hpp>
// Round a / b to nearest higher integer value
int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; }
// complex math functions
sycl::float2 conjugate(sycl::float2 arg) {
return sycl::float2(arg.x(), -arg.y());
}
sycl::float2 complex_exp(float arg) {
return sycl::float2(sycl::cos(arg), sycl::sin(arg));
}
sycl::float2 complex_add(sycl::float2 a, sycl::float2 b) {
return sycl::float2(a.x() + b.x(), a.y() + b.y());
}
sycl::float2 complex_mult(sycl::float2 ab, sycl::float2 cd) {
return sycl::float2(ab.x() * cd.x() - ab.y() * cd.y(),
ab.x() * cd.y() + ab.y() * cd.x());
}
// generate wave heightfield at time t based on initial heightfield and
// dispersion relationship
void generateSpectrumKernel(sycl::float2 *h0, sycl::float2 *ht,
unsigned int in_width, unsigned int out_width,
unsigned int out_height, float t, float patchSize,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int in_index = y * in_width + x;
unsigned int in_mindex =
(out_height - y) * in_width + (out_width - x); // mirrored
unsigned int out_index = y * out_width + x;
// calculate wave vector
sycl::float2 k;
k.x() = (-(int)out_width / 2.0f + x) * (2.0f * 3.141592654F / patchSize);
k.y() = (-(int)out_width / 2.0f + y) * (2.0f * 3.141592654F / patchSize);
// calculate dispersion w(k)
float k_len = sycl::sqrt(k.x() * k.x() + k.y() * k.y());
float w = sycl::sqrt(9.81f * k_len);
if ((x < out_width) && (y < out_height)) {
sycl::float2 h0_k = h0[in_index];
sycl::float2 h0_mk = h0[in_mindex];
// output frequency-space complex values
ht[out_index] =
complex_add(complex_mult(h0_k, complex_exp(w * t)),
complex_mult(conjugate(h0_mk), complex_exp(-w * t)));
// ht[out_index] = h0_k;
}
}
// update height map values based on output of FFT
void updateHeightmapKernel(float *heightMap, sycl::float2 *ht,
unsigned int width,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int i = y * width + x;
// cos(pi * (m1 + m2))
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].x() * sign_correction;
}
// update height map values based on output of FFT
void updateHeightmapKernel_y(float *heightMap, sycl::float2 *ht,
unsigned int width,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int i = y * width + x;
// cos(pi * (m1 + m2))
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].y() * sign_correction;
}
// generate slope by partial differences in spatial domain
void calculateSlopeKernel(float *h, sycl::float2 *slopeOut, unsigned int width,
unsigned int height,
const sycl::nd_item<3> &item_ct1) {
unsigned int x = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
unsigned int y = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
item_ct1.get_local_id(1);
unsigned int i = y * width + x;
sycl::float2 slope = sycl::float2(0.0f, 0.0f);
if ((x > 0) && (y > 0) && (x < width - 1) && (y < height - 1)) {
slope.x() = h[i + 1] - h[i - 1];
slope.y() = h[i + width] - h[i - width];
}
slopeOut[i] = slope;
}
// wrapper functions
extern "C" void cudaGenerateSpectrumKernel(sycl::float2 *d_h0,
sycl::float2 *d_ht,
unsigned int in_width,
unsigned int out_width,
unsigned int out_height,
float animTime, float patchSize) {
sycl::range<3> block(1, 8, 8);
sycl::range<3> grid(1, cuda_iDivUp(out_height, block[1]),
cuda_iDivUp(out_width, block[2]));
/*
DPCT1049:0: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid * block, block), [=](sycl::nd_item<3> item_ct1) {
generateSpectrumKernel(d_h0, d_ht, in_width, out_width, out_height,
animTime, patchSize, item_ct1);
});
}
extern "C" void cudaUpdateHeightmapKernel(float *d_heightMap,
sycl::float2 *d_ht,
unsigned int width,
unsigned int height, bool autoTest) {
sycl::range<3> block(1, 8, 8);
sycl::range<3> grid(1, cuda_iDivUp(height, block[1]),
cuda_iDivUp(width, block[2]));
if (autoTest) {
/*
DPCT1049:1: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid * block, block), [=](sycl::nd_item<3> item_ct1) {
updateHeightmapKernel_y(d_heightMap, d_ht, width, item_ct1);
});
} else {
/*
DPCT1049:2: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid * block, block), [=](sycl::nd_item<3> item_ct1) {
updateHeightmapKernel(d_heightMap, d_ht, width, item_ct1);
});
}
}
extern "C" void cudaCalculateSlopeKernel(float *hptr, sycl::float2 *slopeOut,
unsigned int width,
unsigned int height) {
sycl::range<3> block(1, 8, 8);
sycl::range<3> grid2(1, cuda_iDivUp(height, block[1]),
cuda_iDivUp(width, block[2]));
/*
DPCT1049:3: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(grid2 * block, block), [=](sycl::nd_item<3> item_ct1) {
calculateSlopeKernel(hptr, slopeOut, width, height, item_ct1);
});
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <memory>
#include <oneapi/ccl.hpp>
#include <sycl/sycl.hpp>
#include <unordered_map>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &get_kvs(
const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(
std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs> create_kvs(
const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr) ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() { delete _ccl_stream_ptr; };
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const { return _comm.rank(); }
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const { return _comm.size(); }
/// Return underlying native device, which was used in
/// oneapi::ccl::communicator
sycl::device get_device() const { return _comm.get_device().get_native(); }
/// \brief allreduce is a collective communication operation that performs the
/// global reduction operation
/// on values from all ranks of communicator and distributes the result
/// back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced \param recv_buf [out] the buffer to store reduced
/// result, must have the same dimension as @c send_buf \param count the
/// number of elements of type @c dtype in @c send_buf and @c recv_buf \param
/// dtype the datatype of elements in @c send_buf and @c recv_buf \param rtype
/// the type of the reduction operation to be applied \param queue_ptr a
/// sycl::queue ptr associated with the operation \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the
/// communicator and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c
/// recv_buf \param dtype the datatype of elements in @c send_buf and @c
/// recv_buf \param root the rank that gets the result of reduction \param
/// rtype the type of the reduction operation to be applied \param queue_ptr a
/// sycl::queue ptr associated with the operation \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts
/// data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that
/// performs the global reduction operation
/// on values from all ranks of the communicator and scatters the
/// result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced \param recv_buf [out] the buffer to store reduced
/// result, must have the same dimension as @c send_buf \param recv_count the
/// number of elements of type @c dtype in receive block \param dtype the
/// datatype of elements in @c send_buf and @c recv_buf \param rtype the type
/// of the reduction operation to be applied \param queue_ptr a sycl::queue
/// ptr associated with the operation \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if (!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr =
new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr), _imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh) {
cgh.host_task([=] {
_imp->_ccl_event_impl.wait();
delete _imp;
});
});
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <cassert>
#include <complex>
#include <cstdint>
#include <sycl/sycl.hpp>
#include <type_traits>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT
__attribute__((noduplicate)) T
__spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT
__attribute__((noduplicate)) T
__spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T,
unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT
__attribute__((noduplicate)) T
__spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T>
class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints>
struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T>
struct DataType {
using T2 = T;
};
template <typename T>
struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size,
direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size,
direction)
.wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32) return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T>
inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a) return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T>
inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask, sycl::sub_group g, T x,
int remote_local_id, int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x,
logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask, sycl::sub_group g, T x,
unsigned int delta, int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result =
__spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask, sycl::sub_group g, T x,
unsigned int delta, int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask, sycl::sub_group g, T x,
unsigned int mask, int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size)
? start_index + target_offset
: id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x,
logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime,
"Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime,
"Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void nd_range_barrier(
const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void nd_range_barrier(
const sycl::nd_item<1> &item,
sycl::atomic_ref<unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf) num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ? &get_default_queue() : reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params, typename R,
typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type =
std::tuple_element_t<account_for_default_params<i>(), std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i - 1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i - 1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra) return nullptr;
for (; (std::size_t)*extra != 0; ++extra) {
if ((std::size_t)*extra == 1) {
return static_cast<char *>(*(extra + 1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params), args_buffer(get_args_buffer(extra)) {}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i> *>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i> *>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
class image_channel;
class image_wrapper_base;
namespace detail {
/// Image object type traits, with accessor type and sampled data type defined.
/// The data type of an image accessor must be one of sycl::int4, sycl::uint4,
/// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits
/// channel width will be 32 bits. sycl::half is an exception.
template <class T>
struct image_trait {
using acc_data_t = sycl::vec<T, 4>;
template <int dimensions>
using accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image>;
template <int dimensions>
using array_accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image_array>;
using data_t = T;
using elem_t = T;
static constexpr image_channel_data_type data_type =
std::is_integral<T>::value
? (std::is_signed<T>::value ? image_channel_data_type::signed_int
: image_channel_data_type::unsigned_int)
: image_channel_data_type::fp;
static constexpr int channel_num = 1;
};
template <>
struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> {
using data_t = std::uint8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::uint16_t> : public image_trait<std::uint32_t> {
using data_t = std::uint16_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int8_t> : public image_trait<std::int32_t> {
using data_t = std::int8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int16_t> : public image_trait<std::int32_t> {
using data_t = std::int16_t;
using elem_t = data_t;
};
template <>
struct image_trait<char>
: public image_trait<typename std::conditional<
std::is_signed<char>::value, signed char, unsigned char>::type> {};
template <class T>
struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {};
template <class T>
struct image_trait<sycl::vec<T, 2>> : public image_trait<T> {
using data_t = sycl::vec<T, 2>;
static constexpr int channel_num = 2;
};
template <class T>
struct image_trait<sycl::vec<T, 3>> : public image_trait<sycl::vec<T, 4>> {
static constexpr int channel_num = 3;
};
template <class T>
struct image_trait<sycl::vec<T, 4>> : public image_trait<T> {
using data_t = sycl::vec<T, 4>;
static constexpr int channel_num = 4;
};
/// Functor to fetch data from read result of an image accessor.
template <class T>
struct fetch_data {
using return_t = typename image_trait<T>::data_t;
using acc_data_t = typename image_trait<T>::acc_data_t;
return_t operator()(acc_data_t &&original_data) {
return (return_t)original_data.r();
}
};
template <class T>
struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {};
template <class T>
struct fetch_data<sycl::vec<T, 2>> {
using return_t = typename image_trait<sycl::vec<T, 2>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g());
}
};
template <class T>
struct fetch_data<sycl::vec<T, 3>> : public fetch_data<sycl::vec<T, 4>> {};
template <class T>
struct fetch_data<sycl::vec<T, 4>> {
using return_t = typename image_trait<sycl::vec<T, 4>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g(), origin_data.b(),
origin_data.a());
}
};
/// Create image according with given type \p T and \p dims.
template <class T>
static image_wrapper_base *create_image_wrapper(int dims);
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims);
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel,
int dims);
} // namespace detail
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
/// Create image channel info according to template argument \p T.
template <class T>
static image_channel create() {
image_channel channel;
channel.set_channel_size(
detail::image_trait<T>::channel_num,
sizeof(typename detail::image_trait<T>::elem_t) * 8);
channel.set_channel_data_type(detail::image_trait<T>::data_type);
return channel;
}
image_channel() = default;
image_channel_data_type get_channel_data_type() { return _type; }
void set_channel_data_type(image_channel_data_type type) { _type = type; }
unsigned get_total_size() { return _total_size; }
unsigned get_channel_num() { return _channel_num; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
sycl::image_channel_type get_channel_type() const {
if (_channel_size == 4) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int32;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int32;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp32;
} else if (_channel_size == 2) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int16;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int16;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp16;
} else {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int8;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int8;
}
assert(false && "unexpected channel data kind and channel size");
return sycl::image_channel_type::signed_int32;
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
sycl::image_channel_order get_channel_order() const {
switch (_channel_num) {
case 1:
return sycl::image_channel_order::r;
case 2:
return sycl::image_channel_order::rg;
case 3:
return sycl::image_channel_order::rgb;
case 4:
return sycl::image_channel_order::rgba;
default:
return sycl::image_channel_order::r;
}
}
/// Get the size for each channel in bits.
unsigned get_channel_size() const { return _channel_size * 8; }
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num, unsigned channel_size) {
if (in_channel_num < _channel_num) return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions>
void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i) _range[i] = range[i];
_dims = dimensions;
}
template <int... DimIdx>
sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) {
return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...);
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image() {
return create_image<dimensions>(_channel);
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image(image_channel channel) {
return new sycl::image<dimensions>(
_host_data, channel.get_channel_order(), channel.get_channel_type(),
get_range(make_index_sequence<dimensions>()),
sycl::property::image::use_host_ptr());
}
/// Get channel info.
inline image_channel get_channel() { return _channel; }
/// Get range of the image.
sycl::range<3> get_range() {
return sycl::range<3>(_range[0], _range[1], _range[2]);
}
/// Get matrix dims.
inline int get_dims() { return _dims; }
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data) std::free(_host_data);
_host_data = nullptr;
}
};
using image_matrix_p = image_matrix *;
enum class image_data_type { matrix, linear, pitch, unsupport };
/// Image data info.
class image_data {
public:
image_data() { _type = image_data_type::unsupport; }
image_data(image_matrix_p matrix_data) { set_data(matrix_data); }
image_data(void *data_ptr, size_t x_size, image_channel channel) {
set_data(data_ptr, x_size, channel);
}
image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
set_data(data_ptr, x_size, y_size, pitch_size, channel);
}
void set_data(image_matrix_p matrix_data) {
_type = image_data_type::matrix;
_data = matrix_data;
_channel = matrix_data->get_channel();
}
void set_data(void *data_ptr, size_t x_size, image_channel channel) {
_type = image_data_type::linear;
_data = data_ptr;
_x = x_size;
_channel = channel;
}
void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
_type = image_data_type::pitch;
_data = data_ptr;
_x = x_size;
_y = y_size;
_pitch = pitch_size;
_channel = channel;
}
image_data_type get_data_type() const { return _type; }
void set_data_type(image_data_type type) { _type = type; }
void *get_data_ptr() const { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_x() const { return _x; }
void set_x(size_t x) { _x = x; }
size_t get_y() const { return _y; }
void set_y(size_t y) { _y = y; }
size_t get_pitch() const { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
image_channel get_channel() const { return _channel; }
void set_channel(image_channel channel) { _channel = channel; }
image_channel_data_type get_channel_data_type() {
return _channel.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_channel.set_channel_data_type(type);
}
unsigned get_channel_size() { return _channel.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _channel.set_channel_size(channel_num, channel_size);
}
unsigned get_channel_num() { return _channel.get_channel_num(); }
void set_channel_num(unsigned num) { return _channel.set_channel_num(num); }
sycl::image_channel_type get_channel_type() {
return _channel.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _channel.set_channel_type(type);
}
private:
image_data_type _type;
void *_data = nullptr;
size_t _x, _y, _pitch;
image_channel _channel;
};
/// Image sampling info, include addressing mode, filtering mode and
/// normalization info.
class sampling_info {
sycl::addressing_mode _addressing_mode = sycl::addressing_mode::clamp_to_edge;
sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest;
sycl::coordinate_normalization_mode _coordinate_normalization_mode =
sycl::coordinate_normalization_mode::unnormalized;
public:
sycl::addressing_mode get_addressing_mode() { return _addressing_mode; }
void set(sycl::addressing_mode addressing_mode) {
_addressing_mode = addressing_mode;
}
sycl::filtering_mode get_filtering_mode() { return _filtering_mode; }
void set(sycl::filtering_mode filtering_mode) {
_filtering_mode = filtering_mode;
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _coordinate_normalization_mode;
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_coordinate_normalization_mode = coordinate_normalization_mode;
}
bool is_coordinate_normalized() {
return _coordinate_normalization_mode ==
sycl::coordinate_normalization_mode::normalized;
}
void set_coordinate_normalization_mode(int is_normalized) {
_coordinate_normalization_mode =
is_normalized ? sycl::coordinate_normalization_mode::normalized
: sycl::coordinate_normalization_mode::unnormalized;
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
sycl::sampler get_sampler() {
return sycl::sampler(_coordinate_normalization_mode, _addressing_mode,
_filtering_mode);
}
};
/// Image base class.
class image_wrapper_base {
sampling_info _sampling_info;
image_data _data;
public:
virtual ~image_wrapper_base() = 0;
void attach(image_data data) { set_data(data); }
/// Attach matrix data to this class.
void attach(image_matrix *matrix) {
detach();
image_wrapper_base::set_data(image_data(matrix));
}
/// Attach matrix data to this class.
void attach(image_matrix *matrix, image_channel channel) {
attach(matrix);
image_wrapper_base::set_channel(channel);
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count) {
attach(ptr, count, get_channel());
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count, image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(ptr), count, channel));
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch) {
attach(data, x, y, pitch, get_channel());
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch,
image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(data), x, y, pitch, channel));
}
/// Detach data.
virtual void detach() {}
sampling_info get_sampling_info() { return _sampling_info; }
void set_sampling_info(sampling_info info) { _sampling_info = info; }
const image_data &get_data() { return _data; }
void set_data(image_data data) { _data = data; }
image_channel get_channel() { return _data.get_channel(); }
void set_channel(image_channel channel) { _data.set_channel(channel); }
image_channel_data_type get_channel_data_type() {
return _data.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_data.set_channel_data_type(type);
}
unsigned get_channel_size() { return _data.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _data.set_channel_size(channel_num, channel_size);
}
sycl::addressing_mode get_addressing_mode() {
return _sampling_info.get_addressing_mode();
}
void set(sycl::addressing_mode addressing_mode) {
_sampling_info.set(addressing_mode);
}
sycl::filtering_mode get_filtering_mode() {
return _sampling_info.get_filtering_mode();
}
void set(sycl::filtering_mode filtering_mode) {
_sampling_info.set(filtering_mode);
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _sampling_info.get_coordinate_normalization_mode();
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_sampling_info.set(coordinate_normalization_mode);
}
bool is_coordinate_normalized() {
return _sampling_info.is_coordinate_normalized();
}
void set_coordinate_normalization_mode(int is_normalized) {
_sampling_info.set_coordinate_normalization_mode(is_normalized);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
unsigned get_channel_num() { return _data.get_channel_num(); }
void set_channel_num(unsigned num) { return _data.set_channel_num(num); }
sycl::image_channel_type get_channel_type() {
return _data.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _data.set_channel_type(type);
}
sycl::sampler get_sampler() { return _sampling_info.get_sampler(); }
};
inline image_wrapper_base::~image_wrapper_base() {}
using image_wrapper_base_p = image_wrapper_base *;
template <class T, int dimensions, bool IsImageArray>
class image_accessor_ext;
/// Image class, wrapper of sycl::image.
template <class T, int dimensions, bool IsImageArray = false>
class image_wrapper : public image_wrapper_base {
sycl::image<dimensions> *_image = nullptr;
#ifndef DPCT_USM_LEVEL_NONE
std::vector<char> _host_buffer;
#endif
void create_image(sycl::queue q) {
auto &data = get_data();
if (data.get_data_type() == image_data_type::matrix) {
_image = static_cast<image_matrix_p>(data.get_data_ptr())
->create_image<dimensions>(data.get_channel());
return;
}
auto ptr = data.get_data_ptr();
auto channel = data.get_channel();
if (detail::get_pointer_attribute(q, ptr) ==
detail::pointer_access_attribute::device_only) {
#ifdef DPCT_USM_LEVEL_NONE
ptr = get_buffer(ptr)
.template get_access<sycl::access_mode::read_write>()
.get_pointer();
#else
auto sz = data.get_x();
if (data.get_data_type() == image_data_type::pitch)
sz *= channel.get_total_size() * data.get_y();
_host_buffer.resize(sz);
q.memcpy(_host_buffer.data(), ptr, sz).wait();
ptr = _host_buffer.data();
#endif
}
if constexpr (dimensions == 1) {
assert(data.get_data_type() == image_data_type::linear);
_image = new sycl::image<1>(
ptr, channel.get_channel_order(), channel.get_channel_type(),
sycl::range<1>(data.get_x() / channel.get_total_size()));
} else if constexpr (dimensions == 2) {
assert(data.get_data_type() == image_data_type::pitch);
_image = new sycl::image<2>(ptr, channel.get_channel_order(),
channel.get_channel_type(),
sycl::range<2>(data.get_x(), data.get_y()),
sycl::range<1>(data.get_pitch()));
} else {
throw std::runtime_error("3D image only support matrix data");
}
return;
}
public:
using acc_data_t = typename detail::image_trait<T>::acc_data_t;
using accessor_t =
typename image_accessor_ext<T,
IsImageArray ? (dimensions - 1) : dimensions,
IsImageArray>::accessor_t;
image_wrapper() { set_channel(image_channel::create<T>()); }
~image_wrapper() { detach(); }
/// Get image accessor.
accessor_t get_access(sycl::handler &cgh,
sycl::queue &q = get_default_queue()) {
if (!_image) create_image(q);
return accessor_t(*_image, cgh);
}
/// Detach data.
void detach() override {
if (_image) delete _image;
_image = nullptr;
}
};
/// Wrap sampler and image accessor together.
template <class T, int dimensions, bool IsImageArray = false>
class image_accessor_ext {
public:
using accessor_t =
typename detail::image_trait<T>::template accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 3>
typename std::enable_if<Available, data_t>::type read(float x, float y,
float z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1, class Coord2,
bool Available = dimensions == 3 &&
std::is_integral<Coord0>::value &&
std::is_integral<Coord1>::value &&
std::is_integral<Coord2>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y,
Coord2 z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(float x, float y) {
return detail::fetch_data<T>()(_img_acc.read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1,
bool Available = dimensions == 2 &&
std::is_integral<Coord0>::value &&
std::is_integral<Coord1>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) {
return detail::fetch_data<T>()(_img_acc.read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(float x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
/// Read data from accessor.
template <class CoordT,
bool Available = dimensions == 1 && std::is_integral<CoordT>::value>
typename std::enable_if<Available, data_t>::type read(CoordT x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
};
template <class T, int dimensions>
class image_accessor_ext<T, dimensions, true> {
public:
using accessor_t =
typename detail::image_trait<T>::template array_accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, float x,
float y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, int x,
int y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, float x) {
return detail::fetch_data<T>()(_img_acc[index].read(x, _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, int x) {
return detail::fetch_data<T>()(_img_acc[index].read(x, _sampler));
}
};
/// Create image wrapper according to image data and sampling info.
/// \return Pointer to image wrapper base class.
/// \param data Image data used to create image wrapper.
/// \param info Image sampling info used to create image wrapper.
/// \returns Pointer to base class of created image wrapper object.
static inline image_wrapper_base *create_image_wrapper(image_data data,
sampling_info info) {
image_channel channel;
int dims = 1;
if (data.get_data_type() == image_data_type::matrix) {
auto matrix = (image_matrix_p)data.get_data_ptr();
channel = matrix->get_channel();
dims = matrix->get_dims();
} else {
if (data.get_data_type() == image_data_type::pitch) {
dims = 2;
}
channel = data.get_channel();
}
if (auto ret = detail::create_image_wrapper(channel, dims)) {
ret->set_sampling_info(info);
ret->set_data(data);
return ret;
}
return nullptr;
}
namespace detail {
/// Create image according with given type \p T and \p dims.
template <class T>
static image_wrapper_base *create_image_wrapper(int dims) {
switch (dims) {
case 1:
return new image_wrapper<T, 1>();
case 2:
return new image_wrapper<T, 2>();
case 3:
return new image_wrapper<T, 3>();
default:
return nullptr;
}
}
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num,
int dims) {
switch (channel_num) {
case 1:
return create_image_wrapper<T>(dims);
case 2:
return create_image_wrapper<sycl::vec<T, 2>>(dims);
case 3:
return create_image_wrapper<sycl::vec<T, 3>>(dims);
case 4:
return create_image_wrapper<sycl::vec<T, 4>>(dims);
default:
return nullptr;
}
}
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel,
int dims) {
switch (channel.get_channel_type()) {
case sycl::image_channel_type::fp16:
return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims);
case sycl::image_channel_type::fp32:
return create_image_wrapper<float>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int8:
return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int16:
return create_image_wrapper<std::int16_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::signed_int32:
return create_image_wrapper<std::int32_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::unsigned_int8:
return create_image_wrapper<std::uint8_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::unsigned_int16:
return create_image_wrapper<std::uint16_t>(channel.get_channel_num(),
dims);
case sycl::image_channel_type::unsigned_int32:
return create_image_wrapper<std::uint32_t>(channel.get_channel_num(),
dims);
default:
return nullptr;
}
}
} // namespace detail
} // namespace dpct
#endif // !__DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/kernel.hpp | //==---- kernel.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_KERNEL_HPP__
#define __DPCT_KERNEL_HPP__
#include <sycl/sycl.hpp>
#ifdef _WIN32
#include <windows.h>
#include <unordered_set>
#else
#include <dlfcn.h>
#endif
#if defined(__has_include) && __has_include(<filesystem>)
#include <filesystem>
#elif defined(__has_include) && __has_include(<experimental/filesystem>)
#include <experimental/filesystem>
#else
#error "SYCLomatic runtime requires C++ filesystem support"
#endif
#include <fstream>
#include <image.hpp>
#include <random>
namespace dpct {
typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &,
unsigned int, void **, void **);
struct kernel_function_info {
int max_work_group_size = 0;
};
static inline void get_kernel_function_info(kernel_function_info *kernel_info,
const void *function) {
kernel_info->max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
}
static inline kernel_function_info get_kernel_function_info(
const void *function) {
kernel_function_info kernel_info;
kernel_info.max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
return kernel_info;
}
namespace detail {
#if defined(__has_include) && __has_include(<filesystem>)
namespace fs = std::filesystem;
#else
namespace fs = std::experimental::filesystem;
#endif
/// Write data to temporary file and return absolute path to temporary file.
/// Temporary file is created in a temporary directory both of which have random
/// names with only the user having access permissions. Only one temporary file
/// will be created in the temporary directory.
static inline fs::path write_data_to_file(char const *const data, size_t size) {
std::error_code ec;
if (sizeof(size_t) >= sizeof(std::streamsize) &&
size > (std::numeric_limits<std::streamsize>::max)())
throw std::runtime_error("data file too large");
// random number generator
std::random_device dev;
std::mt19937 prng(dev());
std::uniform_int_distribution<uint64_t> rand(0);
// find temporary directory
auto tmp_dir = fs::temp_directory_path(ec);
if (ec) throw std::runtime_error("could not find temporary directory");
// create private directory
std::stringstream directory;
fs::path directory_path;
constexpr int max_attempts = 5;
int i;
for (i = 0; i < max_attempts; i++) {
directory << std::hex << rand(prng);
directory_path = tmp_dir / directory.str();
if (fs::create_directory(directory_path)) {
break;
}
}
if (i == max_attempts) throw std::runtime_error("could not create directory");
// only allow owner permissions to private directory
fs::permissions(directory_path, fs::perms::owner_all, ec);
if (ec) throw std::runtime_error("could not set directory permissions");
// random filename in private directory
std::stringstream filename;
filename << std::hex << rand(prng);
#ifdef _WIN32
auto filepath = directory_path / (filename.str() + ".dll");
#else
auto filepath = directory_path / filename.str();
#endif
// write data to temporary file
auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary);
if (outfile) {
// only allow program to write file
fs::permissions(filepath, fs::perms::owner_write, ec);
if (ec) throw std::runtime_error("could not set permissions");
outfile.write(data, size);
if (!outfile.good()) throw std::runtime_error("could not write data");
outfile.close();
// only allow program to read/execute file
fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec,
ec);
if (ec) throw std::runtime_error("could not set permissions");
} else
throw std::runtime_error("could not write data");
// check temporary file contents
auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary);
if (infile) {
bool mismatch = false;
size_t cnt = 0;
while (1) {
char c;
infile.get(c);
if (infile.eof()) break;
if (c != data[cnt++]) mismatch = true;
}
if (cnt != size || mismatch)
throw std::runtime_error("file contents not written correctly");
} else
throw std::runtime_error("could not validate file");
if (!filepath.is_absolute())
throw std::runtime_error("temporary filepath is not absolute");
return filepath;
}
static inline uint16_t extract16(unsigned char const *const ptr) {
uint16_t ret = 0;
ret |= static_cast<uint16_t>(ptr[0]) << 0;
ret |= static_cast<uint16_t>(ptr[1]) << 8;
return (ret);
}
static inline uint32_t extract32(unsigned char const *const ptr) {
uint32_t ret = 0;
ret |= static_cast<uint32_t>(ptr[0]) << 0;
ret |= static_cast<uint32_t>(ptr[1]) << 8;
ret |= static_cast<uint32_t>(ptr[2]) << 16;
ret |= static_cast<uint32_t>(ptr[3]) << 24;
return (ret);
}
static inline uint64_t extract64(unsigned char const *const ptr) {
uint64_t ret = 0;
ret |= static_cast<uint64_t>(ptr[0]) << 0;
ret |= static_cast<uint64_t>(ptr[1]) << 8;
ret |= static_cast<uint64_t>(ptr[2]) << 16;
ret |= static_cast<uint64_t>(ptr[3]) << 24;
ret |= static_cast<uint64_t>(ptr[4]) << 32;
ret |= static_cast<uint64_t>(ptr[5]) << 40;
ret |= static_cast<uint64_t>(ptr[6]) << 48;
ret |= static_cast<uint64_t>(ptr[7]) << 56;
return (ret);
}
static inline uint64_t get_lib_size(char const *const blob) {
#ifdef _WIN32
///////////////////////////////////////////////////////////////////////
// Analyze DOS stub
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
if (ublob[0] != 0x4d || ublob[1] != 0x5a) {
throw std::runtime_error("Blob is not a Windows DLL.");
}
uint32_t pe_header_offset = extract32(ublob + 0x3c);
///////////////////////////////////////////////////////////////////////
// Ananlyze PE-header
unsigned char const *const pe_header = ublob + pe_header_offset;
// signature
uint32_t pe_signature = extract32(pe_header + 0);
if (pe_signature != 0x00004550) {
throw std::runtime_error("PE-header signature is not 0x00004550");
}
// machine
uint16_t machine = extract16(pe_header + 4);
if (machine != 0x8664) {
throw std::runtime_error("Only DLLs for x64 supported");
}
// number of sections
uint16_t number_of_sections = extract16(pe_header + 6);
// sizeof optional header
uint16_t sizeof_optional_header = extract16(pe_header + 20);
// magic
uint16_t magic = extract16(pe_header + 24);
if (magic != 0x10b && magic != 0x20b) {
throw std::runtime_error("MAGIC is not 0x010b or 0x020b");
}
///////////////////////////////////////////////////////////////////////
// Analyze tail of optional header
constexpr int coff_header_size = 24;
unsigned char const *const tail_of_optional_header =
pe_header + coff_header_size + sizeof_optional_header;
if (extract64(tail_of_optional_header - 8) != 0) {
throw std::runtime_error("Optional header not zero-padded");
}
///////////////////////////////////////////////////////////////////////
// Analyze last section header
constexpr int section_header_size = 40;
unsigned char const *const last_section_header =
tail_of_optional_header + section_header_size * (number_of_sections - 1);
uint32_t sizeof_raw_data = extract32(last_section_header + 16);
uint32_t pointer_to_raw_data = extract32(last_section_header + 20);
return sizeof_raw_data + pointer_to_raw_data;
#else
if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F')
throw std::runtime_error("Blob is not in ELF format");
if (blob[4] != 0x02)
throw std::runtime_error("Only 64-bit headers are supported");
if (blob[5] != 0x01)
throw std::runtime_error("Only little-endian headers are supported");
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
uint64_t e_shoff = extract64(ublob + 0x28);
uint16_t e_shentsize = extract16(ublob + 0x3A);
uint16_t e_shnum = extract16(ublob + 0x3C);
return e_shoff + (e_shentsize * e_shnum);
#endif
}
#ifdef _WIN32
class path_lib_record {
public:
void operator=(const path_lib_record &) = delete;
~path_lib_record() {
for (auto entry : lib_to_path) {
FreeLibrary(static_cast<HMODULE>(entry.first));
fs::permissions(entry.second, fs::perms::owner_all);
fs::remove_all(entry.second.remove_filename());
}
}
static void record_lib_path(fs::path path, void *library) {
lib_to_path[library] = path;
}
static void remove_lib(void *library) {
auto path = lib_to_path[library];
std::error_code ec;
FreeLibrary(static_cast<HMODULE>(library));
fs::permissions(path, fs::perms::owner_all);
if (fs::remove_all(path.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
lib_to_path.erase(library);
}
private:
static inline std::unordered_map<void *, fs::path> lib_to_path;
};
#endif
} // namespace detail
class kernel_library {
public:
kernel_library() : ptr{nullptr} {}
kernel_library(void *ptr) : ptr{ptr} {}
operator void *() const { return ptr; }
private:
void *ptr;
#ifdef _WIN32
static inline detail::path_lib_record single_instance_to_trigger_destructor;
#endif
};
namespace detail {
static inline kernel_library load_dl_from_data(char const *const data,
size_t size) {
fs::path filename = write_data_to_file(data, size);
#ifdef _WIN32
void *so = LoadLibraryW(filename.wstring().c_str());
#else
void *so = dlopen(filename.c_str(), RTLD_LAZY);
#endif
if (so == nullptr) throw std::runtime_error("Failed to load kernel library");
#ifdef _WIN32
detail::path_lib_record::record_lib_path(filename, so);
#else
std::error_code ec;
// Windows DLL cannot be deleted while in use
if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
#endif
return so;
}
} // namespace detail
/// Load kernel library and return a handle to use the library.
/// \param [in] name The name of the library.
static inline kernel_library load_kernel_library(const std::string &name) {
std::ifstream ifs;
ifs.open(name, std::ios::in | std::ios::binary);
std::stringstream buffer;
buffer << ifs.rdbuf();
const std::string buffer_string = buffer.str();
return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size());
}
/// Load kernel library whose image is alreay in memory and return a handle to
/// use the library.
/// \param [in] image A pointer to the image in memory.
static inline kernel_library load_kernel_library_mem(char const *const image) {
const size_t size = detail::get_lib_size(image);
return detail::load_dl_from_data(image, size);
}
/// Unload kernel library.
/// \param [in,out] library Handle to the library to be closed.
static inline void unload_kernel_library(const kernel_library &library) {
#ifdef _WIN32
detail::path_lib_record::remove_lib(library);
#else
dlclose(library);
#endif
}
class kernel_function {
public:
kernel_function() : ptr{nullptr} {}
kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {}
operator void *() const { return ((void *)ptr); }
void operator()(sycl::queue &q, const sycl::nd_range<3> &range,
unsigned int a, void **args, void **extra) {
ptr(q, range, a, args, extra);
}
private:
dpct::kernel_functor ptr;
};
/// Find kernel function in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the kernel function.
static inline dpct::kernel_function get_kernel_function(
kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)),
(name + std::string("_wrapper")).c_str()));
#else
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
dlsym(library, (name + std::string("_wrapper")).c_str()));
#endif
if (fn == nullptr) throw std::runtime_error("Failed to get function");
return fn;
}
/// Invoke a kernel function.
/// \param [in] function kernel function.
/// \param [in] queue SYCL queue used to execute kernel
/// \param [in] groupRange SYCL group range
/// \param [in] localRange SYCL local range
/// \param [in] localMemSize The size of local memory required by the kernel
/// function.
/// \param [in] kernelParams Array of pointers to kernel arguments.
/// \param [in] extra Extra arguments.
static inline void invoke_kernel_function(dpct::kernel_function &function,
sycl::queue &queue,
sycl::range<3> groupRange,
sycl::range<3> localRange,
unsigned int localMemSize,
void **kernelParams, void **extra) {
function(queue, sycl::nd_range<3>(groupRange * localRange, localRange),
localMemSize, kernelParams, extra);
}
/// Find image wrapper in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the target image wrapper.
static inline dpct::image_wrapper_base_p get_image_wrapper(
dpct::kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::image_wrapper_base_p fn =
reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress(
static_cast<HMODULE>(static_cast<void *>(library)), name.c_str()));
#else
dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(
dlsym(library, name.c_str()));
#endif
if (fn == nullptr) throw std::runtime_error("Failed to get image");
return fn;
}
} // namespace dpct
#endif // __DPCT_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <limits.h>
#include <math.h>
#include <iostream>
#include <sycl/sycl.hpp>
template <class... Args>
class dpct_kernel_name;
template <int Arg>
class dpct_kernel_scalar;
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "kernel.hpp"
#include "math.hpp"
#include "memory.hpp"
#include "util.hpp"
#if defined(_MSC_VER)
#define __dpct_align__(n) __declspec(align(n))
#define __dpct_inline__ __forceinline
#else
#define __dpct_align__(n) __attribute__((aligned(n)))
#define __dpct_inline__ __inline__ __attribute__((always_inline))
#endif
#if defined(_MSC_VER)
#define __dpct_noinline__ __declspec(noinline)
#else
#define __dpct_noinline__ __attribute__((noinline))
#endif
#define DPCT_COMPATIBILITY_TEMP (600)
namespace dpct {
enum error_code { success = 0, default_error = 999 };
}
#define DPCT_CHECK_ERROR(expr) \
[&]() { \
try { \
expr; \
return dpct::success; \
} catch (std::exception const &e) { \
std::cerr << e.what() << std::endl; \
return dpct::default_error; \
} \
}()
#define DPCT_PI_F (3.14159274101257f)
#define DPCT_PI (3.141592653589793115998)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dnnl_utils.hpp | //==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DNNL_UTILS_HPP__
#define __DPCT_DNNL_UTILS_HPP__
#include <algorithm>
#include <list>
#include <oneapi/dnnl/dnnl.hpp>
#include <oneapi/dnnl/dnnl_sycl.hpp>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <oneapi/mkl.hpp>
#include <oneapi/mkl/rng/device.hpp>
#include <sycl/sycl.hpp>
#include <unordered_map>
#include "device.hpp"
#include "lib_common_utils.hpp"
#include "memory.hpp"
namespace dpct {
namespace dnnl {
/// Get concatenated library version as an integer.
static inline size_t get_version() {
const ::dnnl::version_t *ver = ::dnnl::version();
return ver->major * 1000 + ver->minor * 100 + ver->patch;
}
class engine_ext;
typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t;
/// An enum class representing memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class memory_format_tag { nchw, nhwc, nchw_blocked };
/// An enum class representing RNN data memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class rnn_memory_format_tag { tnc, ntc };
/// A class holding the description of an N-dimensions memory.
class memory_desc_ext {
::dnnl::memory::desc _desc;
public:
/// Convert dpct::library_data_t to dnnl::memory::data_type.
static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt);
/// Convert dnnl::memory::data_type to dpct::library_data_t.
static dpct::library_data_t to_dpct_library_data_t(
::dnnl::memory::data_type dt, unsigned block_size);
/// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag.
static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag);
memory_desc_ext() = default;
memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {}
memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {}
/// Setting a 4D memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h,
int w);
/// Setting a 3D RNN data memory with given parameters.
/// \param [in] tag RNN data format tag.
/// \param [in] dt Data type.
/// \param [in] t Number of sequence length.
/// \param [in] n Number of batch.
/// \param [in] c Height of input channel.
void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n,
int c);
/// Setting a 4D memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
/// \param [in] n_stride Stride between two continuous images.
/// \param [in] c_stride Stride between two continuous channels.
/// \param [in] h_stride Stride between two continuous rows.
/// \param [in] w_stride Stride between two continuous columns.
void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride,
int c_stride, int h_stride, int w_stride);
/// Setting a ND memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension. \param [in] strides Array of dimension ndims that
/// contain the stride of each memory dimension.
void set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]);
/// Setting a ND memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension.
void set(memory_format_tag tag, dpct::library_data_t dt, int ndims,
const int dims[]);
/// Getting a ::dnnl::memory::desc from a memory_desc_ext.
/// \returns The ::dnnl::memory::desc.
const ::dnnl::memory::desc &get_desc() const { return _desc; }
/// Setting holding desc with given dnnl memory descriptor.
void set_desc(::dnnl::memory::desc desc) { _desc = desc; }
/// Getting a size of a memory_desc_ext in bytes.
/// \returns The size.
size_t get_size() const { return _desc.get_size(); }
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
/// \param [out] n_stride Stride between two continuous images.
/// \param [out] c_stride Stride between two continuous channels.
/// \param [out] h_stride Stride between two continuous rows.
/// \param [out] w_stride Stride between two continuous columns.
void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w,
int *n_stride, int *c_stride, int *h_stride, int *w_stride) const;
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c,
int *h, int *w) const;
/// Getting parameters from a 3D RNN data memory.
/// \param [out] dt Data type.
/// \param [out] tag RNN data format tag.
/// \param [out] t Number of sequence length.
/// \param [out] n Number of batch.
/// \param [out] c Height of input channel.
void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n,
int *c) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
/// \param [out] strides Array of dimension requested_ndims that contain the
/// stride of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt, int *ndims,
int dims[], int strides[]) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims, int dims[]) const;
/// Getting dims from a ND memory.
/// \return The dims.
std::vector<int64_t> get_dims() const { return _desc.get_dims(); }
/// Getting strides from a ND memory.
/// \return The strides.
std::vector<int64_t> get_strides() const { return _desc.get_strides(); }
/// Getting element num from a ND memory.
/// \return The element number.
size_t get_element_num() const {
auto dims = _desc.get_dims();
if (dims.empty()) {
return 0;
}
size_t result = 1;
for (auto &dim : dims) {
result *= dim;
}
return result;
}
operator bool() const { return bool(_desc); }
memory_desc_ext &operator=(std::nullptr_t) {
_desc.reset(nullptr);
return *this;
}
};
/// A class holding description for an activation operation.
class activation_desc {
::dnnl::algorithm _alg;
float _alpha;
float _beta;
public:
/// Setting an activation descriptor with given parameters.
/// \param [in] alg Activation algorithm.
/// \param [in] alpha Value of alpha parameter.
void set(::dnnl::algorithm alg, float alpha) {
_alg = alg;
if (alg == ::dnnl::algorithm::eltwise_clip) {
_alpha = 0;
_beta = alpha;
} else {
_alpha = alpha;
}
}
/// Getting parameters form an activation descriptor.
/// \param [out] alg Activation algorithm.
/// \param [out] alpha Value of alpha parameter.
void get(::dnnl::algorithm *alg, float *alpha) const {
*alg = _alg;
if (_alg == ::dnnl::algorithm::eltwise_clip) {
*alpha = _beta;
} else {
*alpha = _alpha;
}
}
/// Setting the alpha parameter of an activation descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of an activation descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the algorithm parameter of an activation descriptor.
/// \param [in] alg Activation algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Getting the alpha parameter from an activation descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from an activation descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the algorithm parameter from an activation descriptor.
/// \param [out] alg Activation algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
};
/// A class holding description for a local response normalization operation.
class lrn_desc {
unsigned int _local_size;
float _alpha;
float _beta;
float _k;
public:
/// Setting a local response normalization descriptor with given parameters.
/// \param [in] local_size Value of local_size parameter.
/// \param [in] alpha Value of alpha parameter.
/// \param [in] beta Value of beta parameter.
/// \param [in] k Value of k parameter.
void set(unsigned int local_size, float alpha, float beta, float k) {
_local_size = local_size;
_alpha = alpha;
_beta = beta;
_k = k;
}
/// Getting parameters form a local response normalization descriptor.
/// \param [out] local_size Value of local_size parameter.
/// \param [out] alpha Value of alpha parameter.
/// \param [out] beta Value of beta parameter.
/// \param [out] k Value of k parameter.
void get(unsigned int *local_size, float *alpha, float *beta,
float *k) const {
*local_size = _local_size;
*alpha = _alpha;
*beta = _beta;
*k = _k;
}
/// Setting the local size parameter of a local response normalization
/// descriptor.
/// \param [in] local_size Value of local_size parameter.
void set_local_size(unsigned int local_size) { _local_size = local_size; }
/// Setting the alpha parameter of a local response normalization descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of a local response normalization descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the k parameter of a local response normalization descriptor.
/// \param [in] k Value of k parameter.
void set_k(float k) { _k = k; }
/// Getting the local size parameter from a local response normalization
/// descriptor.
/// \param [out] local_size Value of local_size parameter.
unsigned int get_local_size() const { return _local_size; }
/// Getting the alpha parameter from a local response normalization
/// descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from a local response normalization descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the k parameter from a local response normalization descriptor.
/// \param [out] k Value of k parameter.
float get_k() const { return _k; }
};
/// An enum class representing softmax algorithm.
enum class softmax_algorithm { normal, log };
/// An enum class representing softmax mode.
enum class softmax_mode { instance, channel };
/// A class holding description for a pooling operation.
class pooling_desc {
::dnnl::algorithm _alg;
std::vector<int64_t> _stride;
std::vector<int64_t> _kernel;
std::vector<int64_t> _padding;
public:
/// Setting a 2D pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] kernel_h Value of height of kernel.
/// \param [in] kernel_w Value of width of kernel.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h,
int padding_w, int stride_h, int stride_w) {
_alg = alg;
_stride = {stride_h, stride_w};
_kernel = {kernel_h, kernel_w};
_padding = {padding_h, padding_w};
}
/// Setting a ND pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] ndims Dimension of the pooling operation.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension. \param [in] stride Array of dimension ndims containing
/// the stride size of each dimension.
void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[],
int stride[]) {
_alg = alg;
_stride = std::vector<int64_t>(stride, stride + ndims);
_kernel = std::vector<int64_t>(kernel, kernel + ndims);
_padding = std::vector<int64_t>(padding, padding + ndims);
}
/// Getting parameters from a 2D pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] kernel_h Value of height of kernel.
/// \param [out] kernel_w Value of width of kernel.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h,
int *padding_w, int *stride_h, int *stride_w) const {
*alg = _alg;
*kernel_h = _kernel[0];
*kernel_w = _kernel[1];
*padding_h = _padding[0];
*padding_w = _padding[1];
*stride_h = _stride[0];
*stride_w = _stride[1];
}
/// Getting parameters from a ND pooling descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [out] padding Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] stride Array of dimension ndims containing the stride size of
/// each dimension.
void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims,
int kernel[], int padding[], int stride[]) const {
*alg = _alg;
*ndims = _stride.size();
for (int i = 0; i < requested_ndims; i++) {
kernel[i] = _kernel[i];
padding[i] = _padding[i];
stride[i] = _stride[i];
}
}
/// Setting the algorithm parameter of a pooling descriptor.
/// \param [in] alg Pooling algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Setting the stride parameter of a pooling descriptor.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set_stride(const std::vector<int64_t> &stride) { _stride = stride; }
/// Setting the kernel parameter of a pooling descriptor.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; }
/// Setting the padding parameter of a pooling descriptor.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension.
void set_padding(const std::vector<int64_t> &padding) { _padding = padding; }
/// Getting the algorithm parameter from a pooling descriptor.
/// \param [out] alg Pooling algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
/// Getting the stride parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _stride; }
/// Getting the kernel parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the kernel size of each
/// dimension.
const std::vector<int64_t> &get_kernel() const { return _kernel; }
/// Getting the padding parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _padding; }
/// Getting the output dimensions of a memory after 2D pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
*out_n = dims[0];
*out_c = dims[1];
*out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0];
*out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1];
}
/// Getting the output dimensions of a memory after ND pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] =
1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2];
}
}
};
/// An enum class representing reduction operations.
enum class reduction_op {
max,
min,
sum,
mul,
mean,
amax,
mul_no_zeros,
norm1,
norm2
};
/// An enum class representing batch normalization mode.
enum class batch_normalization_mode { per_activation, spatial };
/// An enum class representing batch normalization operations.
enum class batch_normalization_ops { none, activation, add_activation };
/// An enum class representing binary operations.
enum class binary_op { add, sub, mul, div, min, max, sqrt, neg };
/// An struct representing convolution algorithm infomation.
struct convolution_algorithm_info {
::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto;
int status = 0;
};
/// A class holding description for a convolution operation.
class convolution_desc {
std::vector<int64_t> _strides;
std::vector<int64_t> _dilates;
std::vector<int64_t> _paddings;
int _group_count = 1;
::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict;
public:
/// Setting a group count to be used in the convolution.
/// \param [in] group_count Value of group count.
void set_group_count(int group_count) { _group_count = group_count; }
/// Getting a group count specified in the given convolution descriptor.
/// \returns Value of group count.
int get_group_count() { return _group_count; }
/// Setting floating point math mode to be used in the convolution.
/// \param [in] math_mode Value of math_mode.
void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; }
/// Getting floating point math mode specified in the given convolution
/// descriptor. \returns Value of math mode.
::dnnl::fpmath_mode get_math_mode() { return _math_mode; }
/// Setting a 2D convolution descriptor with given parameters.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
/// \param [in] dilate_h Value of height of dilate.
/// \param [in] dilate_w Value of width of dilate.
void set(int padding_h, int padding_w, int stride_h, int stride_w,
int dilate_h, int dilate_w) {
_strides = {stride_h, stride_w};
_dilates = {dilate_h - 1, dilate_w - 1};
_paddings = {padding_h, padding_w};
}
/// Setting a ND convolution descriptor with given parameters.
/// \param [in] ndims Dimension of the convolution operation.
/// \param [in] paddings Array of dimension ndims containing the padding size
/// of each dimension. \param [in] strides Array of dimension ndims containing
/// the stride size of each dimension. \param [in] dilates Array of dimension
/// ndims containing the kernel size of each dimension.
void set(int ndims, int paddings[], int strides[], int dilates[]) {
_strides = std::vector<int64_t>(strides, strides + ndims);
_paddings = std::vector<int64_t>(paddings, paddings + ndims);
_dilates = std::vector<int64_t>(dilates, dilates + ndims);
for (auto &dilate : _dilates) {
dilate--;
}
}
/// Getting parameters from a 2D convolution descriptor.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
/// \param [out] dilate_h Value of height of dilate.
/// \param [out] dilate_w Value of width of dilate.
void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w,
int *dilate_h, int *dilate_w) const {
*dilate_h = _dilates[0];
*dilate_w = _dilates[1];
*padding_h = _paddings[0];
*padding_w = _paddings[1];
*stride_h = _strides[0];
*stride_w = _strides[1];
}
/// Getting parameters from a ND convolution descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given convolution descriptor.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] paddings Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] strides Array of dimension ndims containing the stride size
/// of each dimension. \param [out] dilates Array of dimension ndims
/// containing the dilate size of each dimension.
void get(int requested_ndims, int *ndims, int paddings[], int strides[],
int dilates[]) const {
*ndims = _strides.size();
for (int i = 0; i < requested_ndims; i++) {
dilates[i] = _dilates[i];
paddings[i] = _paddings[i];
strides[i] = _strides[i];
}
}
/// Getting the stride parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _strides; }
/// Getting the kernel parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the dilate size of each
/// dimension.
const std::vector<int64_t> &get_dilate() const { return _dilates; }
/// Getting the padding parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _paddings; }
/// Getting the output dimensions of a memory after 2D convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
*out_n = dims[0];
*out_c = weight_dims[0];
*out_h = 1 + (dims[2] + 2 * _paddings[0] -
(1 + (_dilates[0] * (weight_dims[2] - 1)))) /
_strides[0];
*out_w = 1 + (dims[3] + 2 * _paddings[1] -
(1 + (_dilates[1] * (weight_dims[3] - 1)))) /
_strides[1];
}
/// Getting the output dimensions of a memory after ND convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = weight_dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] -
(1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) /
_strides[i - 2];
}
}
convolution_desc &operator=(std::nullptr_t) {
return *this = convolution_desc();
}
operator bool() const {
return !(_strides.size() == 0 && _dilates.size() == 0 &&
_paddings.size() == 0);
}
};
/// An enum class representing rnn mode.
enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru };
/// An enum class representing rnn bias mode.
enum class rnn_bias_mode { none, single };
/// An enum class representing rnn direction.
enum class rnn_direction { unidirectional, bidirectional };
/// A class holding description for a RNN operation.
class rnn_desc {
rnn_mode _mode;
rnn_bias_mode _bias_mode;
rnn_direction _direction;
dpct::library_data_t _dt;
int _input_size;
int _hidden_size;
int _projection_size;
int _layer_size;
public:
void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction,
dpct::library_data_t dt, int input_size, int hidden_size,
int projection_size, int layer_size) {
_mode = mode;
_bias_mode = bias_mode;
_direction = direction;
_input_size = input_size;
_hidden_size = hidden_size;
_projection_size = projection_size;
_layer_size = layer_size;
_dt = dt;
}
void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction,
dpct::library_data_t *dt, int *input_size, int *hidden_size,
int *projection_size, int *layer_size) const {
*mode = _mode;
*bias_mode = _bias_mode;
*direction = _direction;
*input_size = _input_size;
*hidden_size = _hidden_size;
*projection_size = _projection_size;
*layer_size = _layer_size;
*dt = _dt;
}
};
/// A class holding description for a Dropout operation.
class dropout_desc {
struct dropout_desc_imp {
float _p = 0.5f;
unsigned long long _seed = 1;
void *_state = nullptr;
std::vector<std::uint8_t> _host_state;
rng_engine_t _rng_engine;
dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {}
};
std::shared_ptr<dropout_desc_imp> _imp;
void generate(sycl::queue *q, std::int64_t required_state_size,
std::int64_t num, void *buffer) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::event e_gen = oneapi::mkl::rng::generate(
oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p),
_imp->_rng_engine, num, (std::int32_t *)buffer);
sycl::event e_save = q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e_gen);
cgh.host_task([=] {
oneapi::mkl::rng::save_state(_imp->_rng_engine,
_imp->_host_state.data());
});
});
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size,
e_save);
#endif
}
public:
operator bool() const { return bool(_imp); }
dropout_desc &operator=(std::nullptr_t) {
_imp.reset();
return *this;
}
/// Initializing a dropout descriptor.
void init() { _imp = std::make_shared<dropout_desc_imp>(); }
/// Setting a dropout descriptor with given parameters.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void set(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
/// Getting parameters from a dropout descriptor.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void get(float *p, void **states, unsigned long long *seed) const noexcept {
*seed = _imp->_seed;
*states = _imp->_state;
*p = _imp->_p;
}
/// Getting the probability of value set to zero.
/// \returns Probability.
float get_probability() const noexcept { return _imp->_p; }
/// Restoreing a dropout descriptor from stored state.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void restore(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
friend class engine_ext;
};
namespace detail {
typedef std::string primitive_cache_key_type;
typedef std::list<primitive_cache_key_type> usage_list_type;
typedef struct {
::dnnl::primitive *primitive;
usage_list_type::iterator usage_it;
std::function<void(::dnnl::primitive *)> destructor;
sycl::event e;
} primitive_cache_value_type;
typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type>
cache_map_type;
// The primitive cache uses LRU replacement policy, and the default cache
// capacity is 1024.
class primitive_cache {
int _capacity = 1024;
usage_list_type usage;
cache_map_type cache_map;
void touch(cache_map_type::iterator it, sycl::event e = {},
bool update_event = false) {
if (it->second.usage_it != usage.begin()) {
const primitive_cache_key_type &key = it->first;
usage.erase(it->second.usage_it);
usage.push_front(key);
it->second.usage_it = usage.begin();
}
if (update_event) {
it->second.e = e;
}
}
void async_destruct_primitive(const primitive_cache_value_type &value) {
dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) {
cgh.depends_on(value.e);
cgh.host_task([=] { value.destructor(value.primitive); });
});
}
public:
::dnnl::primitive *get(const primitive_cache_key_type &key) {
auto it = cache_map.find(key);
if (it == cache_map.end()) {
return nullptr;
}
touch(it);
return it->second.primitive;
}
void put(const primitive_cache_key_type &key, ::dnnl::primitive *value,
std::function<void(::dnnl::primitive *)> destructor, sycl::event e) {
auto it = cache_map.find(key);
if (it != cache_map.end()) {
touch(it, e, true);
} else {
if (cache_map.size() == _capacity) {
auto last_primitive = cache_map.find(usage.back());
async_destruct_primitive(last_primitive->second);
cache_map.erase(usage.back());
usage.pop_back();
}
usage.push_front(key);
cache_map[key] = {value, usage.begin(), destructor, e};
}
}
~primitive_cache() {
for (auto &v : cache_map) {
async_destruct_primitive(v.second);
}
}
};
} // namespace detail
/// A class holding the oneDNN engine.
class engine_ext {
struct output_argument_info {
float _alpha;
float _beta;
int _name;
memory_desc_ext _desc;
void *_data;
output_argument_info(float alpha, float beta, int name,
memory_desc_ext desc, void *data)
: _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {}
output_argument_info(float alpha, float beta, memory_desc_ext desc,
void *data)
: _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {}
};
::dnnl::engine _eng;
::dnnl::stream _s;
sycl::queue *_q = nullptr;
std::map<void *, ::dnnl::memory> workspace_map;
std::int64_t _random_engine_state_size = -1;
detail::primitive_cache _primitive_cache;
::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; }
void insert_workspace(void *key, ::dnnl::memory workspace) {
workspace_map[key] = workspace;
}
const ::dnnl::stream &get_stream() const { return _s; }
const ::dnnl::engine &get_engine() const { return _eng; }
void *allocate(const memory_desc_ext &desc, int count = 1) const;
::dnnl::memory::desc compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc);
::dnnl::memory::desc get_bn_scale_bias_mean_var_desc(
const ::dnnl::memory::desc &desc, batch_normalization_mode mode);
sycl::event batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var);
sycl::event batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var);
::dnnl::memory::desc transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc);
::dnnl::memory::desc bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches);
::dnnl::memory::desc transfer_memory_desc_to_format_tag_any(
const ::dnnl::memory::desc &desc) {
return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(),
::dnnl::memory::format_tag::any);
}
void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc,
void *&from,
::dnnl::memory::desc &to_desc,
void *&to,
std::vector<void *> &caches) {
if (from_desc != to_desc) {
to = allocate(to_desc);
caches.push_back(to);
async_reorder(1.f, from_desc, from, 0.f, to_desc, to);
}
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive(args_type &&...args);
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive_with_pd(const typename primitive_type::primitive_desc &pd);
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc create_primitive_desc(
args_type &&...args);
template <typename primitive_desc_type>
std::string generate_cache_key(const primitive_desc_type &pd);
void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) {
ss.write((char *)dims.data(), dims.size() * sizeof(int64_t));
};
void serialize_mem_desc(std::stringstream &ss,
const ::dnnl::memory::desc &desc) {
if (desc.is_zero()) {
return;
}
auto format_kind = desc.get_format_kind();
ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type()
<< (std::uint8_t)format_kind;
serialize_dims(ss, desc.get_dims());
serialize_dims(ss, desc.get_strides());
if (format_kind == ::dnnl::memory::format_kind::blocked) {
ss << desc.get_inner_nblks();
serialize_dims(ss, desc.get_inner_blks());
serialize_dims(ss, desc.get_inner_idxs());
}
};
sycl::event execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size,
int gate_num, int projection_size, std::vector<void *> &data,
std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr,
size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr);
sycl::event rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query);
sycl::event execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num);
void async_free(sycl::queue *q, sycl::event e,
std::unordered_map<int, ::dnnl::memory> *args,
std::vector<void *> device_ptrs = {}) {
q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
if (args) {
delete args;
}
for (auto ptr : device_ptrs) {
if (ptr) {
sycl::free(ptr, *_q);
}
}
});
});
};
bool scale_parameter_preprocess(
const std::vector<output_argument_info> &args);
template <typename primitive_type>
sycl::event execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &extra_args = {},
const std::vector<void *> &device_ptrs = {});
template <typename T>
sycl::event fill_with_type(sycl::queue *q, void *src, const void *value,
size_t size_with_byte) {
return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value),
size_with_byte / sizeof(T));
}
template <typename T>
struct no_zero_op {
T operator()(T e) {
if (!e) {
return 1;
}
return e;
}
};
template <typename T>
void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst,
size_t num) {
std::transform(oneapi::dpl::execution::make_device_policy(*q),
static_cast<T *>(src), static_cast<T *>(src) + num,
static_cast<T *>(dst), no_zero_op<T>());
}
void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst);
::dnnl::memory::desc get_group_weight_desc(
int group_count, const memory_desc_ext &weight_desc);
void get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num);
public:
engine_ext() {}
operator bool() const { return bool(_eng) && bool(_s) && bool(_q); }
engine_ext &operator=(std::nullptr_t) {
_eng.reset(nullptr);
_s.reset(nullptr);
_q = nullptr;
return *this;
}
/// Creating oneDNN engine.
void create_engine() {
_eng = ::dnnl::sycl_interop::make_engine(
dpct::get_current_device(), dpct::get_current_device().get_context());
_s = ::dnnl::sycl_interop::make_stream(
_eng, dpct::get_current_device().default_queue());
_q = &dpct::get_current_device().default_queue();
}
/// Setting the user's SYCL queue for an oneDNN engine.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) {
if (!q) {
throw std::runtime_error("set_queue: pointer must not be nullptr.");
}
if (!_eng) {
throw std::runtime_error("set_queue: current engine is invalid.");
}
if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) {
throw std::runtime_error(
"set_queue: queue is mismatch with current engine context.");
}
_q = q;
_s = ::dnnl::sycl_interop::make_stream(_eng, *q);
}
/// Retrieving the user's SYCL queue set in the oneDNN engine.
/// \returns Pointer to the SYCL queue.
sycl::queue *get_queue() const { return _q; }
/// Setting all elements of a memory to a given value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
void fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
void scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified pooling function value.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
void pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value. \param [in] dst_desc Destination memory descriptor. \param [in] dst
/// Pointer to destination data. \param [in] diff_dst_desc Differential
/// destination memory descriptor. \param [in] diff_dst Pointer to
/// differential destination data. \param [in] src_desc Source memory
/// descriptor. \param [in] src Pointer to source data. \param [in] beta Value
/// to scaling factors used to scale the prior value in the differential
/// destination memory. \param [in] diff_src_desc Differential source memory
/// descriptor. \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
void lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Setting all elements of a memory to a given value asynchronously.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
/// \returns An event representing the fill operations.
sycl::event async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reorder operations.
sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor asynchronously.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
/// \returns An event representing the scale operations.
sycl::event async_scale(float alpha, const memory_desc_ext &src_desc,
void *src);
/// Adding the scaled values of a memory to another memory asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the sum operations.
sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified binary operation.
/// \param [in] alpha_0 Value to scaling factors used to scale the src_0
/// value.
/// \param [in] src_desc_0 Source 0 memory descriptor.
/// \param [in] src_0 Pointer to source 0 data.
/// \param [in] alpha_1 Value to scaling factors used to scale the src_1
/// value.
/// \param [in] src_desc_1 Source 1 memory descriptor.
/// \param [in] src_1 Pointer to source 1 data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the binary operations.
sycl::event async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified reduction operation.
/// \param [in] alpha Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reduction operations.
sycl::event async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing a specified activation function value asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the activation forward operations.
sycl::event async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the activation backward operations.
sycl::event async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value asynchronously.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation. \returns An event representing the pooling forward
/// operations.
sycl::event async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst,
::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
/// \returns An event representing the pooling backward operations.
sycl::event async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the softmax forward operations.
sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified softmax function asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the softmax backward operations.
sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value
/// asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the lrn forward operations.
sycl::event async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value. \param [in] dst_desc Destination memory descriptor. \param [in] dst
/// Pointer to destination data. \param [in] diff_dst_desc Differential
/// destination memory descriptor. \param [in] diff_dst Pointer to
/// differential destination data. \param [in] src_desc Source memory
/// descriptor. \param [in] src Pointer to source data. \param [in] beta Value
/// to scaling factors used to scale the prior value in the differential
/// destination memory. \param [in] diff_src_desc Differential source memory
/// descriptor. \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the lrn backward operations.
sycl::event async_lrn_backward(
lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] desc Derived memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] scale_bias_desc Derived scale and bias memory descriptor.
/// \param [out] mean_var_desc Derived mean and var memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode);
/// Get the size of workspace that needed by batch normalization. The data
/// stored in workspace must be preserved between forward and backward. \param
/// [in] ops Batch normalization operation mode. This mode can set to perform
/// only batch normalization, or batch normalization followed by activation,
/// or batch normalization followed by element-wise addition and activation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Size of workspace.
size_t get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean,
void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously. \param [in] mode Batch normalization mode. \param [in]
/// epsilon Epsilon value used in computation. \param [in] alpha_data Value to
/// scaling factors used to scale the computed data value. \param [in]
/// src_desc Source memory descriptor. \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior
/// value in the data memory. \param [in] diff_src_desc Differential source
/// memory descriptor. \param [out] diff_src Pointer to differential source
/// data. \param [in] alpha_param Value to scaling factors used to scale the
/// computed parameter value. \param [in] diff_scale_bias_mean_var_desc
/// Differential scale, bias, mean, variance memory descriptor. \param [in]
/// scale Pointer to scale data. \param [in] beta_param Value to scaling
/// factors used to scale the prior value in the parameter memory. \param [in]
/// diff_scale Pointer to differential scale data. \param [in] diff_bias
/// Pointer to differential bias data. \param [in] saved_mean Pointer to
/// optional cache saved mean data in forward. \param [in] saved_var Pointer
/// to optional cache saved variance data in forward. \returns An event
/// representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior
/// value in the data memory. \param [in] diff_src_desc Differential source
/// memory descriptor. \param [out] diff_src Pointer to differential source
/// data. \param [in] diff_summand_desc Differential summand memory
/// descriptor. \param [out] diff_summand Pointer to differential summand
/// data. \param [in] alpha_param Value to scaling factors used to scale the
/// computed parameter value. \param [in] diff_scale_bias_mean_var_desc
/// Differential scale, bias, mean, variance memory descriptor. \param [in]
/// scale Pointer to scale data. \param [in] bias Pointer to bias data. \param
/// [in] beta_param Value to scaling factors used to scale the prior value in
/// the parameter memory. \param [out] diff_scale Pointer to differential
/// scale data. \param [out] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in
/// forward. \param [in] saved_var Pointer to optional cache saved variance
/// data in forward. \param [in] workspace_size Size of workspace. \param [in]
/// workspace Pointer to workspace used for backward propagation. \returns An
/// event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior
/// value in the data memory. \param [in] diff_src_desc Differential source
/// memory descriptor. \param [out] diff_src Pointer to differential source
/// data. \param [in] diff_summand_desc Differential summand memory
/// descriptor. \param [out] diff_summand Pointer to differential summand
/// data. \param [in] alpha_param Value to scaling factors used to scale the
/// computed parameter value. \param [in] diff_scale_bias_desc Differential
/// scale, bias memory descriptor. \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior
/// value in the parameter memory. \param [out] diff_scale Pointer to
/// differential scale data. \param [out] diff_bias Pointer to differential
/// bias data. \param [in] mean_var_desc Differential mean, variance memory
/// descriptor. \param [in] saved_mean Pointer to optional cache saved mean
/// data in forward. \param [in] saved_var Pointer to optional cache saved
/// variance data in forward. \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward
/// operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean,
void *saved_var, size_t workspace_size, void *workspace);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] alpha_0 Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] alpha_1 Value to scaling factors used to scale the summand
/// value.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] bias_desc Bias memory descriptor.
/// \param [in] bias Pointer to bias data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the data gradient of a specified convolution function
/// asynchronously. \param [in] desc Convolution descriptor. \param [in] alg
/// Convolution algorithm. \param [in] alpha Value to scaling factors used to
/// scale the computed value. \param [in] weight_desc Weight memory
/// descriptor. \param [in] weight Pointer to weight data. \param [in]
/// diff_dst_desc Differential destination memory descriptor. \param [in]
/// diff_dst Pointer to differential destination data. \param [in] beta Value
/// to scaling factors used to scale the prior value in the destination
/// memory. \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the convolution backward data operations.
sycl::event async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing the weight gradient of a specified convolution function
/// asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_weight_desc Differential weight memory descriptor.
/// \param [out] diff_weight Pointer to differential weight data.
/// \returns An event representing the convolution backward weight operations.
sycl::event async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight);
/// Computing the bias gradient of a specified convolution function
/// asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_bias_desc Differential bias memory descriptor.
/// \param [out] diff_bias Pointer to differential bias data.
/// \returns An event representing the convolution backward bias operations.
sycl::event async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias);
/// Getting the required weight space size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [out] weight_space_size Size of required weight space.
void rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size);
/// Getting the required scratchpad size and workspace size for specified rnn
/// operation. \param [in] desc RNN descriptor. \param [in] kind Propagation
/// kind. \param [in] src_desc Source memory descriptor. \param [out]
/// scratchpad_size Size of required scratchpad. \param [out] workspace_size
/// Size of required workspace.
void rnn_get_scratchpad_workspace_size(const rnn_desc &desc,
::dnnl::prop_kind kind,
const memory_desc_ext &src_desc,
size_t *scratchpad_size,
size_t *workspace_size);
/// Computing a specified rnn function value asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] dst_iter Pointer to output recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] dst_c_iter Pointer to output recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn forward operations.
sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc,
void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc,
void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight,
size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Computing the data and weight gradient of a specified rnn function
/// asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] diff_dst_iter Pointer to differential output recurrent hidden
/// state data. \param [out] diff_src_iter Pointer to differential input
/// recurrent hidden state data. \param [in] iter_c_desc Recurrent cell state
/// data memory descriptor. \param [in] src_c_iter Pointer to input recurrent
/// cell state data. \param [in] diff_dst_c_iter Pointer to differential
/// output recurrent cell state data. \param [out] diff_src_c_iter Pointer to
/// differential input recurrent cell state data. \param [in] weight_size Size
/// of weight memory. \param [in] weight Pointer to weight data. \param [out]
/// diff_weight Pointer to differential weight data. \param [in]
/// scratchpad_size Size of scratchpad memory. \param [in] scratchpad Pointer
/// to scratchpad data. \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn backward operations.
sycl::event async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src,
void *diff_src, const memory_desc_ext &iter_desc, void *src_iter,
void *diff_dst_iter, void *diff_src_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Getting the required state size for specified dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of state.
size_t get_dropout_state_size();
/// Getting the required workspace size for dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of workspace.
static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc);
/// Computing a specified dropout function value asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout forward operations.
sycl::event async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
void *workspace, size_t workspace_size);
/// Computing the gradient of a specified dropout function asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout backward operations.
sycl::event async_dropout_backward(dropout_desc &desc,
const memory_desc_ext &diff_dst_desc,
void *diff_dst,
const memory_desc_ext &diff_src_desc,
void *diff_src, void *workspace,
size_t workspace_size);
};
inline void dropout_desc::restore(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error(
"restore: state_size less than required state size.");
}
sycl::queue *q = engine.get_queue();
_imp->_p = p;
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size)
.wait();
_imp->_rng_engine = oneapi::mkl::rng::load_state<rng_engine_t>(
*q, _imp->_host_state.data());
}
#endif
}
inline void dropout_desc::set(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
_imp->_p = p;
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("set: no sufficient memory to save states.");
}
sycl::queue *q = engine.get_queue();
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
_imp->_rng_engine = rng_engine_t(*q, seed);
oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data());
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size)
.wait();
}
#endif
}
inline ::dnnl::memory::data_type memory_desc_ext::to_dnnl_data_type(
dpct::library_data_t dt) {
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dpct::library_data_t::real_half:
return dnnl_dt::f16;
case dpct::library_data_t::real_bfloat16:
return dnnl_dt::bf16;
case dpct::library_data_t::real_float:
return dnnl_dt::f32;
case dpct::library_data_t::real_int32:
return dnnl_dt::s32;
case dpct::library_data_t::real_int8:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8:
return dnnl_dt::u8;
case dpct::library_data_t::real_int8_4:
return dnnl_dt::s8;
case dpct::library_data_t::real_int8_32:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8_4:
return dnnl_dt::u8;
default:
throw std::runtime_error("to_dnnl_data_type: unsupported data type.");
}
}
inline dpct::library_data_t memory_desc_ext::to_dpct_library_data_t(
::dnnl::memory::data_type dt, unsigned block_size) {
using dpct_dt = dpct::library_data_t;
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dnnl_dt::f16:
return dpct_dt::real_half;
case dnnl_dt::bf16:
return dpct_dt::real_bfloat16;
case dnnl_dt::f32:
return dpct_dt::real_float;
case dnnl_dt::s32:
return dpct_dt::real_int32;
case dnnl_dt::s8:
if (block_size == 4) {
return dpct_dt::real_int8_4;
} else if (block_size == 32) {
return dpct_dt::real_int8_32;
} else {
return dpct_dt::real_int8;
}
case dnnl_dt::u8:
if (block_size == 4) {
return dpct_dt::real_uint8_4;
} else {
return dpct_dt::real_uint8;
}
default:
throw std::runtime_error(
"to_dpct_library_data_t: unsupported data type "
"dnnl::memory::data_type::undef.");
}
}
inline ::dnnl::memory::format_tag memory_desc_ext::to_dnnl_format_tag(
dpct::library_data_t dt, memory_format_tag tag) {
using dpct_dt = dpct::library_data_t;
using dpct_tag = memory_format_tag;
using dnnl_tag = ::dnnl::memory::format_tag;
switch (tag) {
case dpct_tag::nchw:
return dnnl_tag::nchw;
case dpct_tag::nhwc:
return dnnl_tag::nhwc;
default:
if (dt == dpct_dt::real_int8_32) {
return dnnl_tag::nChw32c;
} else {
return dnnl_tag::nChw4c;
}
}
}
inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int n, int c, int h, int w) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h,
int w, int n_stride, int c_stride,
int h_stride, int w_stride) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
{n_stride, c_stride, h_stride, w_stride});
}
inline void memory_desc_ext::set(dpct::library_data_t dt, int ndims,
const int dims[], const int strides[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
{strides, strides + ndims});
}
inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int ndims, const int dims[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline void memory_desc_ext::set(rnn_memory_format_tag tag,
dpct::library_data_t dt, int t, int n, int c) {
if (tag == rnn_memory_format_tag::tnc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::tnc);
} else if (tag == rnn_memory_format_tag::ntc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::ntc);
} else {
throw std::runtime_error("set: unsupported memory format tag.");
}
}
inline void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c,
int *h, int *w, int *n_stride, int *c_stride,
int *h_stride, int *w_stride) const {
unsigned block_size = 1;
auto dims = _desc.get_dims();
auto inner_blks = _desc.get_inner_blks();
auto strides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
*n_stride = strides[0] / block_size;
*c_stride = strides[1] / block_size;
*h_stride = strides[2] / block_size;
*w_stride = strides[3] / block_size;
}
inline void memory_desc_ext::get(dpct::library_data_t *dt,
memory_format_tag *tag, int *n, int *c, int *h,
int *w) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
auto inner_blks = _desc.get_inner_blks();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (strides[1] == 1 && dims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
}
inline void memory_desc_ext::get(dpct::library_data_t *dt,
rnn_memory_format_tag *tag, int *t, int *n,
int *c) const {
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = rnn_memory_format_tag::tnc;
} else {
*tag = rnn_memory_format_tag::ntc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), 1);
*t = dims[0];
*n = dims[1];
*c = dims[2];
}
inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
int *ndims, int dims[], int strides[]) const {
unsigned block_size = 1;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
strides[index] = astrides[index] / block_size;
}
}
inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims,
int dims[]) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (astrides[1] == 1 && adims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
}
}
inline void engine_ext::get_rnn_configuration(
const ::dnnl::memory::desc &desc, rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt, ::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size, int *seq_length, int *batch_size,
int *direction_num, int *gate_num) {
if (!desc.is_zero()) {
auto dims = desc.get_dims();
auto strides = desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = ::dnnl::memory::format_tag::tnc;
*seq_length = dims[0];
*batch_size = dims[1];
} else {
*tag = ::dnnl::memory::format_tag::ntc;
*seq_length = dims[1];
*batch_size = dims[0];
}
}
if (direction == rnn_direction::bidirectional) {
*direction_num = 2;
} else {
*direction_num = 1;
}
if (mode == rnn_mode::lstm) {
*gate_num = 4;
} else if (mode == rnn_mode::gru) {
*gate_num = 3;
} else {
*gate_num = 1;
}
if (*projection_size != hidden_size) {
*output_size = *projection_size;
} else {
*projection_size = 0;
*output_size = hidden_size;
}
*dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt);
}
inline void *engine_ext::allocate(const memory_desc_ext &data_desc,
int count) const {
size_t mem_size = data_desc.get_size();
void *mem = sycl::malloc_device(mem_size * count, *_q);
return mem;
}
inline void engine_ext::transform_no_zero(const memory_desc_ext &desc,
void *src, void *dst) {
::dnnl::memory::data_type dt = desc.get_desc().get_data_type();
size_t element_num = desc.get_element_num();
switch (dt) {
case ::dnnl::memory::data_type::f32:
transform_no_zero_with_type<float>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::f16:
transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s32:
transform_no_zero_with_type<int32_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s8:
transform_no_zero_with_type<int8_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::u8:
transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num);
break;
default:
throw std::runtime_error("transform_no_zero: unsupported data type.");
}
}
inline ::dnnl::memory::desc engine_ext::get_group_weight_desc(
int group_count, const memory_desc_ext &weight_desc) {
if (group_count == 1) {
return weight_desc.get_desc();
}
auto help_weight_desc = weight_desc.get_desc();
int ndims = help_weight_desc.get_ndims();
if (!help_weight_desc.get_inner_blks().empty()) {
throw std::runtime_error(
"get_group_weight_desc: group convolution with "
"blocked weight memory unimplemented.");
}
std::vector<int64_t> new_size;
auto old_size = weight_desc.get_dims();
new_size.push_back(group_count);
new_size.push_back(old_size[0] / group_count);
for (int index = 1; index < old_size.size(); index++) {
new_size.push_back(old_size[index]);
}
std::vector<int64_t> strides = help_weight_desc.get_strides();
::dnnl::memory::format_tag tag;
bool is_nhwc = (strides[1] == 1 && old_size[1] != 1);
if (ndims == 4) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::gohwi;
} else {
tag = ::dnnl::memory::format_tag::goihw;
}
} else if (ndims == 5) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::godhwi;
} else {
tag = ::dnnl::memory::format_tag::goidhw;
}
}
help_weight_desc = ::dnnl::memory::desc(
new_size, weight_desc.get_desc().get_data_type(), tag);
return help_weight_desc;
}
inline ::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
auto inner_blks = desc.get_inner_blks();
assert(ndims >= 4 && "ndims is at least 4.");
std::vector<int64_t> compressed_dims(ndims);
compressed_dims[0] = dims[0];
compressed_dims[1] = dims[1];
for (int index = 2; index < ndims; index++) {
compressed_dims[1] = compressed_dims[1] * dims[index];
compressed_dims[index] = 1;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw4c);
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw32c);
}
std::vector<int64_t> strides(ndims, 1);
strides[0] = compressed_dims[1];
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides);
}
inline ::dnnl::memory::desc engine_ext::get_bn_scale_bias_mean_var_desc(
const ::dnnl::memory::desc &desc, batch_normalization_mode mode) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
assert(ndims >= 4 && "ndims is at least 4.");
int channel_num = 1;
if (mode == batch_normalization_mode::spatial) {
channel_num = dims[1];
} else {
for (int index = 1; index < ndims; index++) {
channel_num = channel_num * dims[index];
}
}
return ::dnnl::memory::desc({channel_num}, desc.get_data_type(),
::dnnl::memory::format_tag::a);
}
inline ::dnnl::memory::desc
engine_ext::transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc) {
if (!desc.get_inner_blks().empty()) {
return desc;
}
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
if (ndims == 4) {
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::nchw);
}
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::ncdhw);
}
/// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out +
/// beta * prior_dst) have no change. In this case this function returns true
/// means the operation can exit directly.
inline bool engine_ext::scale_parameter_preprocess(
const std::vector<output_argument_info> &args) {
bool direct_exit = true;
for (auto &arg : args) {
if (arg._alpha == 0.f) {
if (arg._beta != 1.f) {
async_scale(arg._beta, arg._desc, arg._data);
}
} else {
direct_exit = false;
}
}
return direct_exit;
}
inline void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode) {
derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode);
derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode);
}
inline void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode) {
int src_ndims = src_desc.get_desc().get_ndims();
auto inner_blks = src_desc.get_desc().get_inner_blks();
if (src_desc.get_desc().get_ndims() != 4 ||
src_desc.get_desc().get_ndims() != 5) {
throw std::runtime_error(
"derive_batch_normalization_memory_desc: only 4d "
"and 5d memory descriptor supported.");
}
std::vector<int64_t> dims = src_desc.get_dims();
dims[0] = 1;
if (mode == batch_normalization_mode::spatial) {
dims[2] = 1;
dims[3] = 1;
if (src_ndims == 5) {
dims[4] = 1;
}
}
auto data_type = src_desc.get_desc().get_data_type();
if (data_type == ::dnnl::memory::data_type::f16) {
data_type = ::dnnl::memory::data_type::f32;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw4c));
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw32c));
} else {
if (src_ndims == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nchw));
} else {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::ncdhw));
}
}
}
template <typename primitive_type>
sycl::event engine_ext::execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &output_args,
const std::vector<void *> &device_ptrs) {
std::vector<void *> caches;
int output_arg_num = output_args.size();
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
auto cache = allocate(output_args[i]._desc);
caches.push_back(cache);
args->insert(
{output_args[i]._name,
::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)});
} else {
args->insert(
{output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(),
_eng, output_args[i]._data)});
}
}
auto e = ::dnnl::sycl_interop::execute(
*(static_cast<primitive_type *>(primitive.second)), _s, *args);
_primitive_cache.put(
primitive.first, primitive.second,
[](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e);
int cache_index = 0;
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
e = async_sum(output_args[i]._alpha, output_args[i]._desc,
caches[cache_index++], output_args[i]._beta,
output_args[i]._desc, output_args[i]._data);
} else {
if (output_args[i]._alpha != 1.f) {
e = async_scale(output_args[i]._alpha, output_args[i]._desc,
output_args[i]._data);
}
}
}
caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end());
async_free(_q, e, args, caches);
return e;
}
inline ::dnnl::memory::desc
engine_ext::bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches) {
::dnnl::memory::desc result;
result = transfer_memory_desc_to_channel_major_format(desc);
if ((result != desc) || !src) {
*cache = allocate(desc);
if (is_input && src) {
async_reorder(1.f, desc, src, 0.f, result, *cache);
}
caches.push_back(*cache);
}
return result;
}
inline sycl::event engine_ext::batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) {
if (scale_parameter_preprocess(
{{alpha_data, beta_data, diff_src_desc, diff_src},
{alpha_param, beta_param, diff_scale_bias_desc, diff_scale},
{alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_diff_dst = nullptr,
*reordered_diff_src = nullptr, *reordered_scale = nullptr,
*reordered_bias = nullptr, *reordered_diff_scale = nullptr,
*reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr,
*reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_diff_scale_bias_desc =
diff_scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc;
::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_scale_bias_desc, scale, &reordered_scale, caches);
actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (bias) {
bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc,
bias, &reordered_bias, caches);
}
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_scale, &reordered_diff_scale,
caches);
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_bias, &reordered_diff_bias,
caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches);
bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_var, &reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
} else {
if ((help_src_desc != help_diff_dst_desc) ||
(help_src_desc != help_diff_src_desc) ||
(help_diff_dst_desc != help_diff_src_desc)) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
}
}
help_diff_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
auto forward_primitive =
create_primitive_desc<::dnnl::batch_normalization_forward>(
::dnnl::prop_kind::forward_training, help_src_desc,
help_diff_dst_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift);
auto primitive = create_primitive<::dnnl::batch_normalization_backward>(
::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc,
help_src_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift,
forward_primitive);
void *dst_cache = nullptr;
if (!saved_mean && !saved_var) {
dst_cache = allocate(diff_dst_desc);
if (!reordered_saved_mean) {
reordered_saved_mean = allocate(mean_var_desc);
caches.push_back(reordered_saved_mean);
}
if (!reordered_saved_var) {
reordered_saved_var = allocate(mean_var_desc);
caches.push_back(reordered_saved_var);
}
if (!bias) {
_q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size());
}
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc,
dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias,
mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr,
nullptr);
caches.push_back(dst_cache);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_diff_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(
help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean : saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_src_desc, _eng,
reordered_diff_dst ? reordered_diff_dst : diff_dst)}}};
sycl::event e = execute_primitive(
primitive, execution_args,
{{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc,
reordered_diff_src ? reordered_diff_src : diff_src},
{alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc,
reordered_diff_scale ? reordered_diff_scale : diff_scale},
{alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc,
reordered_diff_bias ? reordered_diff_bias : diff_bias}});
if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) {
e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f,
diff_src_desc, diff_src);
}
if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() &&
reordered_diff_scale && reordered_diff_bias) {
async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f,
diff_scale_bias_desc, diff_scale);
e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias,
0.f, diff_scale_bias_desc, diff_bias);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline sycl::event engine_ext::batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_dst = nullptr,
*reordered_scale = nullptr, *reordered_bias = nullptr,
*reordered_saved_mean = nullptr, *reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_dst_desc = help_dst_desc;
::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
help_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_scale_bias_desc, scale, &reordered_scale, caches);
bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias,
&reordered_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_mean, &reordered_saved_mean,
caches);
actual_mean_var_desc = help_mean_var_desc;
bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_var, &reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
} else {
if (help_src_desc != help_dst_desc) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
}
}
help_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
::dnnl::prop_kind kind;
::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift;
if (is_infer) {
kind = ::dnnl::prop_kind::forward_inference;
flag = ::dnnl::normalization_flags::use_global_stats | flag;
} else {
kind = ::dnnl::prop_kind::forward_training;
}
auto primitive = create_primitive<::dnnl::batch_normalization_forward>(
kind, help_src_desc, help_dst_desc, epsilon, flag);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_SHIFT,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_bias ? reordered_bias : bias)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(
help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean : saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(
help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}}};
sycl::event e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, help_dst_desc,
reordered_dst ? reordered_dst : dst}});
if (!is_infer && running_var) {
auto src_ndim = src_desc.get_desc().get_ndims();
auto src_dims = src_desc.get_dims();
int element_num = src_dims[0];
if (mode == batch_normalization_mode::spatial) {
for (int index = 2; index < src_ndim; index++) {
element_num *= src_dims[index];
}
}
float unbias_factor = element_num / (element_num - 1.f);
async_scale(1.f - factor, mean_var_desc, running_var);
e = async_sum(factor * unbias_factor, mean_var_desc,
reordered_saved_var ? reordered_saved_var : saved_var, 1.f,
mean_var_desc, running_var);
}
if (!is_infer && running_mean) {
e = async_sum(factor, mean_var_desc,
reordered_saved_mean ? reordered_saved_mean : saved_mean,
(1.f - factor), mean_var_desc, running_mean);
}
if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) {
e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst);
}
if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean &&
saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) {
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f,
mean_var_desc, saved_mean);
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f,
mean_var_desc, saved_var);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline sycl::event engine_ext::rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
std::vector<void *> data = {src, dst, src_iter,
dst_iter, src_iter_c, dst_iter_c,
weight, workspace, scratchpad};
std::vector<int> offset(6, 0);
void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr;
sycl::event e;
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
// Here to combine the oneDNN bidirectional_sum and
// bidirectional_concat config, so call execute_rnn_forward_primitive
// twice.
if (layer_size > 1) {
if (!is_get_execution_args) {
input_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
_q->memcpy(input_layer_cache, src, src_desc.get_size());
}
data[0] = input_layer_cache;
data[1] = hidden_layer_cache;
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, 1, direction_num, hidden_size, gate_num, projection_size,
data, offset, layer_size - 1, weight_size_query, workspace_size_query,
scratchpad_size_query);
data[0] =
((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache;
data[1] = dst;
}
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
2 * output_size, 1, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
} else {
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
}
if (is_get_execution_args) {
return e;
}
if (input_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(input_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline sycl::event engine_ext::execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num,
int projection_size, std::vector<void *> &data, std::vector<int> &offset,
int iter_num, size_t *weight_size, size_t *workspace_size,
size_t *scratchpad_size) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
std::unordered_map<int, ::dnnl::memory> *execution_args;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
}
if (weight_size) {
*weight_size +=
(weight_layer_desc.get_size() + weight_iter_desc.get_size() +
projection_desc.get_size() + bias_desc.get_size()) *
iter_num;
return e;
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
kind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::gru) {
auto pd = create_primitive_desc<::dnnl::gru_forward>(
kind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::lstm) {
auto pd = create_primitive_desc<::dnnl::lstm_forward>(
kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd);
key = r.first;
p = r.second;
}
}
for (int i = 0; i < iter_num; i++) {
void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr,
*dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]);
if (mode == rnn_mode::lstm) {
dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2];
}
if (!workspace_size) {
execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}},
{DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}},
{DNNL_ARG_SCRATCHPAD,
{::dnnl::memory(scratchpad_desc, _eng, data[8])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}});
offset += d.get_size();
};
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]);
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6],
offset[4]);
insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6],
offset[4]);
}
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size());
}
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]);
if (kind == ::dnnl::prop_kind::forward_training) {
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]);
}
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
execute_primitive<::dnnl::vanilla_rnn_forward>(
{key, static_cast<::dnnl::vanilla_rnn_forward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
execute_primitive<::dnnl::gru_forward>(
{key, static_cast<::dnnl::gru_forward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
execute_primitive<::dnnl::lstm_forward>(
{key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[0], data[1]);
}
}
if (kind == ::dnnl::prop_kind::forward_training) {
if (workspace_size) {
*workspace_size +=
(src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size());
if (mode == rnn_mode::lstm) {
*workspace_size += iter_c_desc.get_size();
}
} else {
_q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache,
src_desc.get_size());
offset[5] += src_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache,
dst_desc.get_size());
offset[5] += dst_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache,
iter_desc.get_size());
offset[5] += iter_desc.get_size();
if (mode == rnn_mode::lstm) {
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache,
iter_c_desc.get_size());
offset[5] += iter_c_desc.get_size();
}
}
}
}
return e;
}
inline sycl::event engine_ext::execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training;
::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc diff_weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc diff_weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc projection_desc, diff_projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
diff_projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldoi);
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
fkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>(
bkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::gru) {
auto fpd = create_primitive_desc<::dnnl::gru_forward>(
fkind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::gru_backward>(
bkind, direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::lstm) {
auto fpd = create_primitive_desc<::dnnl::lstm_forward>(
fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
auto pd = create_primitive_desc<::dnnl::lstm_backward>(
bkind, direction, src_desc, iter_desc, iter_c_desc,
diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(),
diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc,
src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc,
::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc,
iter_c_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd);
key = r.first;
p = r.second;
}
for (int i = 0; i < iter_num; i++) {
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}},
{DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}},
{DNNL_ARG_SCRATCHPAD,
{::dnnl::memory(scratchpad_desc, _eng, data[15])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
offset += d.get_size();
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}});
};
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
}
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]);
insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]);
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]);
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6],
offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]);
insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]);
insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]);
}
insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]);
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size());
}
if (projection_size) {
insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14],
offset[8]);
}
insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14],
offset[8]);
insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14],
offset[8]);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
e = execute_primitive<::dnnl::vanilla_rnn_backward>(
{key, static_cast<::dnnl::vanilla_rnn_backward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
e = execute_primitive<::dnnl::gru_backward>(
{key, static_cast<::dnnl::gru_backward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
e = execute_primitive<::dnnl::lstm_backward>(
{key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[8], data[9]);
}
}
return e;
}
#define GENERATE_RNN_PRIMITIVE_KEY(name) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \
<< (std::uint8_t)pd.get_algorithm(); \
serialize_mem_desc(ss, pd.src_layer_desc()); \
serialize_mem_desc(ss, pd.src_iter_desc()); \
serialize_mem_desc(ss, pd.dst_layer_desc()); \
serialize_mem_desc(ss, pd.dst_iter_desc()); \
serialize_mem_desc(ss, pd.diff_src_layer_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_desc()); \
serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \
serialize_mem_desc(ss, pd.src_iter_c_desc()); \
serialize_mem_desc(ss, pd.dst_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \
return ss.str(); \
}
#define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_algorithm() \
<< (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \
<< (std::uint8_t)pd.get_group_size(); \
serialize_dims(ss, pd.get_strides()); \
serialize_dims(ss, pd.get_dilations()); \
serialize_dims(ss, pd.get_padding_l()); \
serialize_mem_desc(ss, pd.src_desc()); \
serialize_mem_desc(ss, pd.diff_src_desc()); \
serialize_mem_desc(ss, pd.dst_desc()); \
serialize_mem_desc(ss, pd.diff_dst_desc()); \
serialize_mem_desc(ss, pd.query_type()); \
serialize_mem_desc(ss, pd.weights_desc()); \
serialize_mem_desc(ss, pd.diff_weights_desc()); \
return ss.str(); \
}
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward)
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_forward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_backward)
GENERATE_RNN_PRIMITIVE_KEY(gru_forward)
GENERATE_RNN_PRIMITIVE_KEY(gru_backward)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc)
template <typename primitive_desc_type>
std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) {
std::stringstream ss;
auto kind = pd.get_kind();
ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind()
<< (std::uint8_t)pd.get_algorithm();
serialize_mem_desc(ss, pd.src_desc());
serialize_mem_desc(ss, pd.diff_src_desc());
serialize_mem_desc(ss, pd.dst_desc());
serialize_mem_desc(ss, pd.diff_dst_desc());
switch (kind) {
case ::dnnl::primitive::kind::batch_normalization:
ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags();
case ::dnnl::primitive::kind::reduction:
ss << pd.get_p();
break;
case ::dnnl::primitive::kind::eltwise:
ss << pd.get_alpha() << pd.get_beta();
case ::dnnl::primitive::kind::lrn:
ss << pd.get_k();
break;
case ::dnnl::primitive::kind::pooling:
serialize_dims(ss, pd.get_strides());
serialize_dims(ss, pd.get_dilations());
serialize_dims(ss, pd.get_padding_l());
serialize_dims(ss, pd.get_kernel());
break;
case ::dnnl::primitive::kind::softmax:
ss << pd.get_axis();
break;
default:
break;
}
return ss.str();
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive(args_type &&...args) {
auto pd =
create_primitive_desc<primitive_type>(std::forward<args_type>(args)...);
return create_primitive_with_pd<primitive_type>(pd);
}
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive_with_pd(
const typename primitive_type::primitive_desc &pd) {
detail::primitive_cache_key_type key = generate_cache_key(pd);
primitive_type *p = (primitive_type *)_primitive_cache.get(key);
if (!p) {
p = new primitive_type(pd);
}
return {key, p};
}
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc engine_ext::create_primitive_desc(
args_type &&...args) {
return typename primitive_type::primitive_desc(
_eng, std::forward<args_type>(args)...);
}
inline void engine_ext::fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
async_fill(src_desc, src, valuePtr).wait();
}
inline void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline void engine_ext::scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
async_scale(alpha, src_desc, src).wait();
}
inline void engine_ext::sum(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline void engine_ext::activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline void engine_ext::activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src)
.wait();
}
inline void engine_ext::pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst,
workspace)
.wait();
}
inline void engine_ext::pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src,
workspace)
.wait();
}
inline void engine_ext::softmax_forward(softmax_algorithm alg,
softmax_mode mode, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline void engine_ext::softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc,
diff_dst, beta, diff_src_desc, diff_src)
.wait();
}
inline void engine_ext::lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace)
.wait();
}
inline void engine_ext::lrn_backward(
lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src, workspace)
.wait();
}
inline sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc,
void *src, const void *valuePtr) {
::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type();
unsigned mem_size = src_desc.get_size();
switch (dt) {
case ::dnnl::memory::data_type::f32:
return fill_with_type<float>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::f16:
return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s32:
return fill_with_type<int32_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s8:
return fill_with_type<int8_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::u8:
return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size);
default:
throw std::runtime_error("async_fill: unsupported data type.");
}
}
inline sycl::event engine_ext::async_reorder(float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng,
dst_desc.get_desc());
auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_scale(float alpha,
const memory_desc_ext &src_desc,
void *src) {
if (alpha == 1.f) {
return sycl::event();
}
void *src_cache = allocate(src_desc);
_q->memcpy(src_cache, src, src_desc.get_size());
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear,
src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f);
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}};
return execute_primitive(primitive, args, {}, {src_cache});
}
inline sycl::event engine_ext::async_sum(float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
void *dst_cache = allocate(dst_desc);
_q->memcpy(dst_cache, dst, dst_desc.get_size());
auto pd = create_primitive_desc<::dnnl::sum>(
std::vector<float>{alpha, beta},
std::vector<::dnnl::memory::desc>{src_desc.get_desc(),
dst_desc.get_desc()});
std::stringstream ss;
ss << (std::uint8_t)pd.get_kind() << alpha << beta;
serialize_mem_desc(ss, pd.src_desc(0));
serialize_mem_desc(ss, pd.src_desc(1));
detail::primitive_cache_key_type key = ss.str();
::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key);
if (!p) {
p = new ::dnnl::sum(pd);
}
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)},
{DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_MULTIPLE_SRC + 1,
::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}};
return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache});
}
inline sycl::event engine_ext::async_binary(
binary_op op, float alpha_0, const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1, void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst) {
::dnnl::algorithm onednn_algorithm;
switch (op) {
case binary_op::max:
onednn_algorithm = ::dnnl::algorithm::binary_max;
break;
case binary_op::min:
onednn_algorithm = ::dnnl::algorithm::binary_min;
break;
case binary_op::add:
onednn_algorithm = ::dnnl::algorithm::binary_add;
break;
case binary_op::sub:
onednn_algorithm = ::dnnl::algorithm::binary_sub;
break;
case binary_op::mul:
onednn_algorithm = ::dnnl::algorithm::binary_mul;
break;
case binary_op::div:
onednn_algorithm = ::dnnl::algorithm::binary_div;
break;
case binary_op::sqrt:
onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt;
break;
case binary_op::neg:
onednn_algorithm = ::dnnl::algorithm::eltwise_linear;
break;
}
if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt ||
onednn_algorithm == ::dnnl::algorithm::eltwise_linear) {
void *src_cache = nullptr, *dst_cache = nullptr;
src_cache = allocate(src_desc_0);
dst_cache = allocate(dst_desc);
_q->memcpy(src_cache, src_0, src_desc_0.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_cache);
async_scale(beta, dst_desc, dst_cache);
// Let the output = 1 - input to simulate the behavior of neg.
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, onednn_algorithm,
src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}};
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(src_cache, *_q);
sycl::free(dst_cache, *_q);
});
});
return e;
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{};
void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr;
src_0_cache = allocate(src_desc_0);
src_1_cache = allocate(src_desc_1);
dst_cache = allocate(dst_desc);
_q->memcpy(src_0_cache, src_0, src_desc_0.get_size());
_q->memcpy(src_1_cache, src_1, src_desc_1.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_0_cache);
async_scale(alpha_1, src_desc_1, src_1_cache);
async_scale(beta, dst_desc, dst_cache);
execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(),
_eng, src_0_cache)});
execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(),
_eng, src_1_cache)});
auto primitive = create_primitive<::dnnl::binary>(
onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(dst_cache, *_q);
sycl::free(src_0_cache, *_q);
sycl::free(src_1_cache, *_q);
});
});
return e;
}
inline sycl::event engine_ext::async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
float p = 2.f;
::dnnl::algorithm onednn_algorithm;
void *cache = nullptr;
switch (op) {
case reduction_op::amax:
cache = allocate(src_desc);
activation_desc adesc;
adesc.set_algorithm(::dnnl::algorithm::eltwise_abs);
async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_max;
src = cache;
break;
case reduction_op::max:
onednn_algorithm = ::dnnl::algorithm::reduction_max;
break;
case reduction_op::min:
onednn_algorithm = ::dnnl::algorithm::reduction_min;
break;
case reduction_op::sum:
onednn_algorithm = ::dnnl::algorithm::reduction_sum;
break;
case reduction_op::mean:
onednn_algorithm = ::dnnl::algorithm::reduction_mean;
break;
case reduction_op::mul:
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
break;
case reduction_op::mul_no_zeros:
cache = allocate(src_desc);
transform_no_zero(src_desc, src, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
src = cache;
break;
case reduction_op::norm1:
p = 1.f;
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum;
break;
case reduction_op::norm2:
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum;
break;
}
auto primitive = create_primitive<::dnnl::reduction>(
onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}};
if (cache) {
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}},
{cache});
}
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_activation_forward(
activation_desc &desc, float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc data_desc = dst_desc.get_desc();
auto alg = desc.get_algorithm();
if ((alg == ::dnnl::algorithm::eltwise_clip) ||
(alg == ::dnnl::algorithm::eltwise_linear) ||
(alg == ::dnnl::algorithm::eltwise_swish)) {
data_desc = src_desc.get_desc();
}
auto primitive = create_primitive<::dnnl::eltwise_backward>(
alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc,
desc.get_alpha(), desc.get_beta(),
create_primitive_desc<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, alg, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline sycl::event engine_ext::async_pooling_forward(
pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive_desc = create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive = create_primitive<::dnnl::pooling_backward>(
desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(),
desc.get_padding(),
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline sycl::event engine_ext::async_softmax_forward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_src_desc, help_dst_desc, 1);
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}});
}
inline sycl::event engine_ext::async_softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_backward>(
softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1,
create_primitive_desc<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc,
help_dst_desc, 1));
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC,
memory_desc_ext(help_diff_src_desc), diff_src}});
}
inline sycl::event engine_ext::async_lrn_forward(
lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline sycl::event engine_ext::async_lrn_backward(
lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::lrn_backward>(
::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(),
diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(),
desc.get_alpha(), desc.get_beta(), desc.get_k(),
create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline size_t engine_ext::get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc) {
if (ops == batch_normalization_ops::none) {
return 0;
}
return src_desc.get_size();
}
inline sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var) {
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean,
var, nullptr, nullptr);
}
inline sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
std::vector<void *> caches;
if (has_post_op) {
void *dst_cache = allocate(dst_desc);
caches.push_back(dst_cache);
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr,
nullptr);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache);
}
async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc,
dst_cache);
e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr);
}
inline sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var) {
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
}
inline sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
if (has_post_op) {
if (workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error(
"async_batch_normalization_forward_training_ex: "
"no sufficient workspace.");
}
batch_normalization_forward_internal(
false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc,
workspace, scale_bias_desc, scale, bias, mean_var_desc, saved_mean,
saved_var, running_mean, running_var);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, workspace);
}
return async_activation_forward(adesc, alpha, dst_desc, workspace, beta,
dst_desc, dst);
}
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var,
running_mean, running_var);
}
inline sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_forward_training(
mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc,
dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias,
scale_bias_mean_var_desc, running_mean, running_var, saved_mean,
saved_var, workspace_size, workspace);
}
inline sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var) {
return batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst,
beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var);
}
inline sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
std::vector<void *> caches;
::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc();
void *real_diff_dst = diff_dst;
if (ops != batch_normalization_ops::none &&
workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error(
"async_batch_normalization_backward_ex: "
"no sufficient workspace.");
}
if (ops == batch_normalization_ops::add_activation) {
void *diff_summand_cache = allocate(diff_summand_desc);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace, 0.f,
diff_summand_desc, diff_summand_cache);
caches.push_back(diff_summand_cache);
async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data,
diff_summand_desc, diff_summand);
real_diff_dst_desc = diff_summand_desc.get_desc();
real_diff_dst = diff_summand_cache;
} else if (ops == batch_normalization_ops::activation) {
void *diff_dst_cache = allocate(diff_dst_desc);
caches.push_back(diff_dst_cache);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace, 0.f, diff_dst_desc,
diff_dst_cache);
real_diff_dst = diff_dst_cache;
}
sycl::event e = batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc,
real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias,
mean_var_desc, saved_mean, saved_var);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace) {
return async_batch_normalization_backward(
mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst,
diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src,
diff_summand_desc, diff_summand, alpha_param,
diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var,
workspace_size, workspace);
}
inline sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto origin_src_md = src_desc.get_desc();
auto origin_dst_md = dst_desc.get_desc();
auto origin_weight_md = help_weight_desc;
auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md);
auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md);
auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md);
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md,
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
::dnnl::convolution_forward::primitive_desc pd =
::dnnl::convolution_forward::primitive_desc(
const_cast<dnnl_primitive_desc_t>(
primitive.second->get_primitive_desc()));
auto optimal_src_md = pd.src_desc();
auto optimal_dst_md = pd.dst_desc();
auto optimal_weight_md = pd.weights_desc();
void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight;
std::vector<void *> input_caches, output_caches;
allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md,
optimal_src, input_caches);
allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md,
optimal_dst, output_caches);
allocate_and_reorder_memory_to_optimal(origin_weight_md, weight,
optimal_weight_md, optimal_weight,
input_caches);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}},
{DNNL_ARG_WEIGHTS,
{::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}};
auto e = execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}}, input_caches);
if (origin_dst_md != optimal_dst_md) {
e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md,
dst);
}
async_free(_q, e, nullptr, output_caches);
return e;
}
inline sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst) {
int channel_num = bias_desc.get_element_num();
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::memory::desc help_bias_desc = {{channel_num},
bias_desc.get_desc().get_data_type(),
::dnnl::memory::format_tag::a};
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(),
help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}};
void *cache = nullptr;
if (alpha_0 != 1.f) {
cache = allocate(help_weight_desc);
_q->memcpy(cache, weight, weight_desc.get_size());
async_scale(alpha_0, help_weight_desc, cache);
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache});
} else {
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
}
async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst);
return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc,
dst);
}
inline sycl::event engine_ext::async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto,
diff_src_desc.get_desc(), help_weight_desc, diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_data>(
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline sycl::event engine_ext::async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight) {
if (scale_parameter_preprocess(
{{alpha, beta, diff_weight_desc, diff_weight}})) {
return sycl::event();
}
auto help_diff_weight_desc =
get_group_weight_desc(desc.get_group_count(), diff_weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto,
src_desc.get_desc(), help_diff_weight_desc, diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_weights>(
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_WEIGHTS,
help_diff_weight_desc, diff_weight}});
}
inline sycl::event engine_ext::async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) {
return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst,
beta, diff_bias_desc, diff_bias);
}
inline void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size) {
*weight_space_size = 0;
rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference,
memory_desc_ext(), nullptr, memory_desc_ext(), nullptr,
memory_desc_ext(), nullptr, nullptr, memory_desc_ext(),
nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr,
true, weight_space_size, nullptr, nullptr);
return;
}
inline void engine_ext::rnn_get_scratchpad_workspace_size(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, size_t *scratchpad_size,
size_t *workspace_size) {
*workspace_size = 0;
*scratchpad_size = 0;
rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(),
nullptr, memory_desc_ext(), nullptr, nullptr,
memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0,
nullptr, 0, nullptr, true, nullptr, workspace_size,
scratchpad_size);
return;
}
inline sycl::event engine_ext::async_rnn_forward(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
return rnn_forward_internal(
desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter,
iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size,
workspace, scratchpad_size, scratchpad, false, nullptr, nullptr, nullptr);
}
inline sycl::event engine_ext::async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src,
const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter,
void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_memory_format_tag format_tag;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
void *last_layer_cache = nullptr;
void *hidden_layer_cache = nullptr;
sycl::event e;
std::vector<int> offset(9, 0);
std::vector<void *> data = {
src,
dst,
(uint8_t *)src_iter + iter_desc.get_size(),
nullptr,
(uint8_t *)src_iter_c + iter_c_desc.get_size(),
nullptr,
(uint8_t *)weight + weight_size,
(uint8_t *)workspace + workspace_size,
diff_src,
diff_dst,
(uint8_t *)diff_src_iter + iter_desc.get_size(),
(uint8_t *)diff_dst_iter + iter_desc.get_size(),
(uint8_t *)diff_src_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_weight + weight_size,
scratchpad};
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
if (layer_size > 1) {
last_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
data[8] = last_layer_cache;
}
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset, 1);
if (layer_size > 1) {
data[8] = hidden_layer_cache;
data[9] = last_layer_cache;
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset,
layer_size - 1);
_q->memcpy(
diff_src,
((layer_size - 1) % 2 == 0) ? last_layer_cache : hidden_layer_cache,
src_desc.get_size());
}
} else {
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1);
}
if (last_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(last_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline size_t engine_ext::get_dropout_state_size() {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::queue q;
if (_random_engine_state_size == -1) {
if (_q) {
q = *_q;
} else {
q = dpct::get_current_device().default_queue();
}
auto rand_engine = rng_engine_t(q, 0);
_random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine);
}
return _random_engine_state_size;
#endif
}
inline size_t engine_ext::get_dropout_workspace_size(
const memory_desc_ext &src_desc) {
return src_desc.get_size();
}
inline sycl::event engine_ext::async_dropout_forward(
dropout_desc &desc, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst, void *workspace,
size_t workspace_size) {
if (workspace_size < src_desc.get_size()) {
throw std::runtime_error("async_dropout_forward: no sufficient workspace.");
}
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(dst, 0, dst_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst);
}
float scale_factor = 1.f / (1.f - p);
void *cache = workspace;
memory_desc_ext rng_data_desc(
::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32,
src_desc.get_strides()));
if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) {
cache = allocate(rng_data_desc);
}
desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(),
(std::int32_t *)cache);
if (cache == workspace) {
async_scale(scale_factor, src_desc, workspace);
} else {
async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args);
if (cache != workspace) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { sycl::free(cache, *_q); });
});
}
return e;
}
inline sycl::event engine_ext::async_dropout_backward(
dropout_desc &desc, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &diff_src_desc, void *diff_src, void *workspace,
size_t workspace_size) {
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(diff_src, 0, diff_src_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc,
diff_src);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)},
{DNNL_ARG_SRC_1,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(),
diff_dst_desc.get_desc(), diff_src_desc.get_desc());
return execute_primitive(primitive, execution_args);
}
} // namespace dnnl
} // namespace dpct
#endif // __DPCT_DNNL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/lapack_utils.hpp | //==---- lapack_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LAPACK_UTILS_HPP__
#define __DPCT_LAPACK_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "lib_common_utils.hpp"
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace lapack {
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be
/// solved. \param [in] jobz Must be job::novec or job::vec. \param [in] uplo
/// Must be uplo::upper or uplo::lower. \param [in] n The order of the matrices
/// A and B. \param [in,out] a The symmetric matrix A. \param [in] lda The
/// leading dimension of matrix A. \param [in,out] b The symmetric matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T>
inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
T *w, T *scratchpad, int scratchpad_size, int *info) {
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<T>(a);
auto b_buffer = get_buffer<T>(b);
auto w_buffer = get_buffer<T>(w);
auto scratchpad_buffer = get_buffer<T>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w,
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes all the eigenvalues, and optionally, the eigenvectors of a complex
/// generalized Hermitian positive-definite eigenproblem using a divide and
/// conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be
/// solved. \param [in] jobz Must be job::novec or job::vec. \param [in] uplo
/// Must be uplo::upper or uplo::lower. \param [in] n The order of the matrices
/// A and B. \param [in,out] a The Hermitian matrix A. \param [in] lda The
/// leading dimension of matrix A. \param [in,out] b The Hermitian matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [in] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename Tw>
inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
Tw *w, T *scratchpad, int scratchpad_size, int *info) {
using Ty = typename DataType<T>::T2;
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<Ty>(a);
auto b_buffer = get_buffer<Ty>(b);
auto w_buffer = get_buffer<Tw>(w);
auto scratchpad_buffer = get_buffer<Ty>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda,
(Ty *)b, ldb, w, (Ty *)scratchpad,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian,
/// for complex data) positive-definite matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
T *a[], int lda, int *info, int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t lda_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->lda_info = lda;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrf_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a,
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrf_batch_scratchpad_size/potrf_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
/// Solves a batch of systems of linear equations with a Cholesky-factored
/// symmetric (Hermitian) positive-definite coefficient matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] nrhs The number of right-hand sides.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b Array of pointers to matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
int nrhs, T *a[], int lda, T *b[], int ldb, int *info,
int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t nrhs_info;
std::int64_t lda_info;
std::int64_t ldb_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->nrhs_info = nrhs;
matrix_info->lda_info = lda;
matrix_info->ldb_info = ldb;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), &(matrix_info->lda_info),
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrs_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b,
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrs_batch_scratchpad_size/potrs_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad) sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
namespace detail {
template <template <typename> typename functor_t, typename... args_t>
inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info,
std::string const &lapack_api_name, args_t &&...args) {
auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
<< lapack_api_name << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl
<< "detail: " << e.detail() << std::endl;
if (e.info() < std::numeric_limits<int>::min() ||
e.info() > std::numeric_limits<int>::max()) {
throw std::runtime_error("e.info() exceeds the limit of int type");
}
int info_val = static_cast<int>(e.info());
if (info)
dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int),
memcpy_direction::host_to_device)
.wait();
return 1;
};
try {
switch (a_type) {
case library_data_t::real_float: {
functor_t<float>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::real_double: {
functor_t<double>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_float: {
functor_t<std::complex<float>>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_double: {
functor_t<std::complex<double>>()(std::forward<args_t>(args)...);
break;
}
default:
throw std::runtime_error("the data type is unsupported");
}
} catch (oneapi::mkl::lapack::batch_error const &be) {
try {
std::rethrow_exception(be.exceptions()[0]);
} catch (oneapi::mkl::lapack::exception &e) {
return handle_lapack_exception(e);
}
} catch (oneapi::mkl::lapack::exception const &e) {
return handle_lapack_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
if (info) dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait();
return 1;
}
return 0;
}
template <typename T>
class working_memory {
public:
working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) {
_ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q);
}
auto get_memory() {
return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr));
}
auto get_ptr() { return _ptr; }
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_ptr) {
dpct::async_dpct_free({_ptr}, {_e}, _q);
}
}
private:
void *_ptr = nullptr;
sycl::event _e;
sycl::queue _q;
};
std::size_t byte_to_element_number(std::size_t size_in_byte,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
size_in_byte,
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] /
8);
if (dv.rem) {
throw std::runtime_error(
"size_in_byte is not divisible by the size of element (in bytes)");
}
return dv.quot;
}
std::size_t element_number_to_byte(std::size_t size_in_element,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)],
8);
if (dv.rem) {
throw std::runtime_error(
"the size of element (in bits) is not divisible by 8");
}
return size_in_element * dv.quot;
}
inline oneapi::mkl::jobsvd char2jobsvd(signed char job) {
switch (job) {
case 'A':
return oneapi::mkl::jobsvd::vectors;
case 'S':
return oneapi::mkl::jobsvd::somevec;
case 'O':
return oneapi::mkl::jobsvd::vectorsina;
case 'N':
return oneapi::mkl::jobsvd::novec;
default:
throw std::runtime_error("the job type is unsupported");
}
}
template <typename T>
struct getrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T>
struct getrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct getrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>(
q, trans, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data,
b_data, ldb, device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T>
struct geqrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T>
struct geqrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct getrfnp_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::int64_t a_stride = m * lda;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T>
struct gesvd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t u_type, std::int64_t ldu,
library_data_t vt_type, std::int64_t ldvt,
std::size_t &device_ws_size) {
device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>(
q, jobu, jobvt, m, n, lda, ldu, ldvt);
}
};
template <typename T>
struct ElementType {
using value_tpye = T;
};
template <typename T>
struct ElementType<std::complex<T>> {
using value_tpye = T;
};
template <typename T>
struct gesvd_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto s_data = dpct::detail::get_memory(
reinterpret_cast<typename ElementType<T>::value_tpye *>(s));
auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u));
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data,
u_data, ldu, vt_data, ldvt, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct gesvd_conj_impl : public gesvd_impl<T> {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using base = gesvd_impl<T>;
base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u,
ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans,
n, n, T(1.0f), vt_data, ldvt, ldvt);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T>
struct potrf_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda);
}
};
template <typename T>
struct potrf_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T>
struct potrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>(
q, uplo, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb,
device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T>
struct value_type_trait {
using value_type = T;
};
template <typename T>
struct value_type_trait<std::complex<T>> {
using value_type = T;
};
template <typename T>
auto lamch_s() {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
if constexpr (std::is_same_v<T, float>) {
return slamch("S");
} else if constexpr (std::is_same_v<T, double>) {
return dlamch("S");
}
throw std::runtime_error("the type is unsupported");
#endif
}
#define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
#define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
template <typename T>
struct syheevx_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, void *vl, void *vu,
std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range,
uplo, n, lda, vl_value, vu_value, il, iu,
abstol, lda);
#endif
}
};
template <typename T>
constexpr library_data_t get_library_data_t_from_type() {
if constexpr (std::is_same_v<T, float>) {
return library_data_t::real_float;
} else if constexpr (std::is_same_v<T, double>) {
return library_data_t::real_double;
} else if constexpr (std::is_same_v<T, sycl::float2> ||
std::is_same_v<T, std::complex<float>>) {
return library_data_t::complex_float;
} else if constexpr (std::is_same_v<T, sycl::double2> ||
std::is_same_v<T, std::complex<double>>) {
return library_data_t::complex_double;
}
throw std::runtime_error("the type is unsupported");
}
template <typename T>
struct syheevx_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda,
vl_value, vu_value, il, iu, abstol,
m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T>
struct syhegvx_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl,
void *vu, std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz,
range, uplo, n, lda, ldb, vl_value,
vu_value, il, iu, abstol, lda);
#endif
}
};
template <typename T>
struct syhegvx_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, void *a, std::int64_t lda, void *b,
std::int64_t ldb, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data,
lda, b_data, ldb, vl_value, vu_value, il, iu,
abstol, m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T>
struct syhegvd_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::int64_t ldb, std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz,
uplo, n, lda, ldb);
}
};
template <typename T>
struct syhegvd_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *b, std::int64_t ldb, void *w,
void *device_ws, std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda,
b_data, ldb, w_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) {
oneapi::mkl::compz ret;
if (job == oneapi::mkl::job::novec) {
ret = oneapi::mkl::compz::novectors;
} else if (job == oneapi::mkl::job::vec) {
ret = oneapi::mkl::compz::vectors;
} else {
throw std::runtime_error("the job type is unsupported");
}
return ret;
}
template <typename T>
struct syheev_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n,
lda);
#endif
}
};
template <typename T>
struct syheev_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T>
struct syheevd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n,
lda);
}
};
template <typename T>
struct syheevd_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
#undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE
#undef DISPATCH_FLOAT_FOR_CALCULATION
template <typename T>
struct trtri_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
device_ws_size =
oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda);
#endif
}
};
template <typename T>
struct trtri_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
void *a, std::int64_t lda, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
} // namespace detail
/// Computes the size of workspace memory of getrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>(
q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the LU factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by L and U. The unit
/// diagonal elements of L are not stored.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting
/// LU factorization is computed.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
if (ipiv == nullptr) {
return detail::lapack_shim<detail::getrfnp_impl>(
q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv,
device_ws, device_ws_size_in_element_number, info);
}
return detail::lapack_shim<detail::getrf_impl>(
q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws,
device_ws_size_in_element_number, info);
#endif
}
/// Solves a system of linear equations with a LU-factored square coefficient
/// matrix, with multiple right-hand sides.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] trans Indicates the form of the linear equation.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] a The input matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ipiv The pivot indices.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::getrs_impl>(
q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type,
a, lda, ipiv, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of geqrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>(
q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the QR factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the factorization
/// data. \param [in] lda The leading dimension of the matrix A. \param [in]
/// tau_type The data type of the array tau. \param [in] tau The array contains
/// scalars that define elementary reflectors for the matrix Q in its
/// decomposition in a product of elementary reflectors. \param [in] device_ws
/// The workspace. \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::geqrf_impl>(
q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu,
signed char jobvt, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m,
n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] host_ws_size The host workspace size as a number of elements
/// of type \param a_type. Currently the value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
std::int64_t all_vec, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, int *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
std::size_t device_ws_size_64;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type,
lda, u_type, ldu, vt_type, ldvt, device_ws_size_64);
*device_ws_size = device_ws_size_64;
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::gesvd_impl>(
q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda,
s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws,
device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec.
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
detail::lapack_shim<detail::gesvd_conj_impl>(
q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s,
u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
return 0;
}
/// Computes the size of workspace memory of potrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>(
q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the Cholesky factorization of a symmetric (Hermitian)
/// positive-definite matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::potrf_impl>(
q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
}
/// Solves a system of linear equations with a Cholesky-factored symmetric
/// (Hermitian) positive-definite coefficient matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::potrs_impl>(
q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type,
a, lda, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
void *vl, void *vu, std::int64_t il,
std::int64_t iu, library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q,
compz_jobz, range, uplo, n, lda, vl, vu, il, iu, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle
/// is overwritten. \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a,
lda, vl, vu, il, iu, m, w_type, w, device_ws,
device_ws_size_in_element_number, info);
q.wait();
return ret;
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
ValueT vl, ValueT vu, int il, int iu,
int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo,
n, lda, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle
/// is overwritten. \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q,
compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a,
lda, &vl, &vu, il, iu, &m64,
detail::get_library_data_t_from_type<ValueT>(), w, device_ws,
device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvx/hegvx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syhegvx_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, ValueT vl, ValueT vu, int il,
int iu, int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz,
range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a real
/// generalized symmetric/Hermitian definite eigenproblem.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle
/// is overwritten. \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *m, ValueT *w, T *device_ws, int device_ws_size,
int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syhegvx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q,
itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64,
w, device_ws, device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvd/hegvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syhegvd_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n,
lda, ldb, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric/Hermitian definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors. \param [in] lda The leading dimension of the matrix A. \param
/// [in, out] b The input matrix B. \param [in] ldb The leading dimension of the
/// matrix B. \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
return detail::lapack_shim<detail::syhegvd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q,
itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syev/heev function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int *device_ws_size) {
std::size_t device_ws_size_tmp;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda,
device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric
/// or Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
int n, T *a, int lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
return detail::lapack_shim<detail::syheev_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q,
compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q,
jobz, uplo, n, a_type, lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t w_type, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::syheevd_impl>(
q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t lda, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n,
detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, T *a,
std::int64_t lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
return detail::lapack_shim<detail::syheevd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q,
jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w,
device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of trtri function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size) *host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>(
q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type,
lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the inverse of a triangular matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// the inverse matrix of A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::trtri_impl>(
q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
#endif
}
} // namespace lapack
} // namespace dpct
#endif // __DPCT_LAPACK_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/fft_utils.hpp | //==---- fft_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FFT_UTILS_HPP__
#define __DPCT_FFT_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <optional>
#include <sycl/sycl.hpp>
#include <utility>
#include "lib_common_utils.hpp"
namespace dpct {
namespace fft {
/// An enumeration type to describe the FFT direction is forward or backward.
enum fft_direction : int { forward = 0, backward };
/// An enumeration type to describe the types of FFT input and output data.
enum fft_type : int {
real_float_to_complex_float = 0,
complex_float_to_real_float,
real_double_to_complex_double,
complex_double_to_real_double,
complex_float_to_complex_float,
complex_double_to_complex_double,
};
/// A class to perform FFT calculation.
class fft_engine {
public:
/// Default constructor.
fft_engine() {}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
library_data_t input_type, long long *onembed, long long ostride,
long long odist, library_data_t output_type, long long batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<long long>(dim, n, inembed, istride, idist, input_type, onembed,
ostride, odist, output_type, batch,
direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, library_data_t input_type, int *onembed,
int ostride, int odist, library_data_t output_type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride,
odist, output_type, batch, direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, int *onembed, int ostride, int odist,
fft_type type, int batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(1);
_n[0] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 1;
_batch = batch;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(2);
_n[0] = n2;
_n[1] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 2;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(3);
_n[0] = n3;
_n[1] = n2;
_n[2] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 3;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Create the class for calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int n1, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n1, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement);
return engine;
}
/// Create the class for calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n3, n2, n1, type, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *create(
sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride,
int idist, int *onembed, int ostride, int odist, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed,
ostride, odist, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate FFT without commit any config.
static fft_engine *create() {
fft_engine *engine = new fft_engine();
return engine;
}
/// Destroy the class for calculate FFT.
/// \param [in] engine Pointer returned from fft_engine::craete.
static void destroy(fft_engine *engine) { delete engine; }
#ifdef __INTEL_MKL__
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int dim, long long *n, long long *inembed, long long istride,
long long idist, long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int dim, int *n, int *inembed, int istride, int idist, int *onembed,
int ostride, int odist, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 1-D FFT.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If it is not set, forward direction(if current FFT is
/// complex-to-complex) and out-of-place (false) are set by default.
static void estimate_size(
int n1, fft_type type, int batch, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n1, type, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 2-D FFT.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int n2, int n1, fft_type type, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 3-D FFT.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by
/// default.
static void estimate_size(
int n3, int n2, int n1, fft_type type, size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n3, n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
#endif
/// Execute the FFT calculation.
/// \param [in] input Pointer to the input data.
/// \param [out] output Pointer to the output data.
/// \param [in] direction The FFT direction.
template <typename input_t, typename output_t>
void compute(input_t *input, output_t *output, fft_direction direction) {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
} else if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
}
}
template <>
void compute(float *input, sycl::float2 *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(sycl::float2 *input, float *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(double *input, sycl::double2 *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::double2 *input, double *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::float2 *input, sycl::float2 *output,
fft_direction direction) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
}
template <>
void compute(sycl::double2 *input, sycl::double2 *output,
fft_direction direction) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
}
/// Setting the user's SYCL queue for calculation.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) { _q = q; }
#ifdef __INTEL_MKL__
/// Setting whether to use external or internal workspace.
/// \param [in] flag True means using internal workspace. False means using
/// external workspace.
void use_internal_workspace(bool flag = true) {
_use_external_workspace = !flag;
}
/// Specify the external workspace.
/// \param [in] ptr Pointer to the workspace.
void set_workspace(void *ptr) {
if (!_use_external_workspace) {
return;
}
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sc->set_workspace(data);
}
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dc->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sr->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dr->set_workspace(data);
}
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
#endif
/// Get the workspace size.
/// \param [out] scratchpad_size Workspace size in bytes.
void get_workspace_size(size_t *scratchpad_size) {
if (scratchpad_size) {
*scratchpad_size = _workspace_bytes;
}
}
private:
static std::pair<library_data_t, library_data_t> fft_type_to_data_type(
fft_type type) {
switch (type) {
case fft_type::real_float_to_complex_float: {
return std::make_pair(library_data_t::real_float,
library_data_t::complex_float);
}
case fft_type::complex_float_to_real_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::real_float);
}
case fft_type::real_double_to_complex_double: {
return std::make_pair(library_data_t::real_double,
library_data_t::complex_double);
}
case fft_type::complex_double_to_real_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::real_double);
}
case fft_type::complex_float_to_complex_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::complex_float);
}
case fft_type::complex_double_to_complex_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::complex_double);
}
}
}
void config_and_commit_basic() {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
_desc_sc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n) distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_sc->commit(*_q);
#endif
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
_desc_dc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n) distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_dc->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
_desc_sr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
_desc_dr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
#endif
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
void config_and_commit_advanced() {
#ifdef __INTEL_MKL__
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \
if (_use_external_workspace) { \
DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \
} \
if (_is_estimate_call) { \
if (_q->get_device().is_gpu()) { \
DESC->get_value( \
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \
&_workspace_estimate_bytes); \
} \
} else { \
DESC->commit(*_q); \
if (_is_estimate_call) { \
DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \
&_workspace_bytes); \
} \
} \
}
#else
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::NOT_INPLACE); \
DESC->commit(*_q); \
}
#endif
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double);
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float);
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double);
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
#undef CONFIG_AND_COMMIT
}
template <typename T>
void init(int dim, T *n, T *inembed, T istride, T idist,
library_data_t input_type, T *onembed, T ostride, T odist,
library_data_t output_type, T batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement) {
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
_n.resize(dim);
_inembed.resize(dim);
_onembed.resize(dim);
_input_type = input_type;
_output_type = output_type;
for (int i = 0; i < dim; i++) {
_n[i] = n[i];
}
if (inembed && onembed) {
for (int i = 0; i < dim; i++) {
_inembed[i] = inembed[i];
_onembed[i] = onembed[i];
}
_istride = istride;
_ostride = ostride;
if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)) {
_fwd_dist = idist;
_bwd_dist = odist;
} else if ((_output_type == library_data_t::real_float &&
_input_type == library_data_t::complex_float) ||
(_output_type == library_data_t::real_double &&
_input_type == library_data_t::complex_double)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
if (_is_user_specified_dir_and_placement &&
(_direction == fft_direction::backward)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
_fwd_dist = idist;
_bwd_dist = odist;
}
}
} else {
_is_basic = true;
}
_batch = batch;
_dim = dim;
if (_is_basic)
config_and_commit_basic();
else
config_and_commit_advanced();
}
template <class Desc_t>
void set_stride_advanced(std::shared_ptr<Desc_t> desc) {
if (_dim == 1) {
std::int64_t input_stride[2] = {0, _istride};
std::int64_t output_stride[2] = {0, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 2) {
std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride};
std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 3) {
std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride,
_inembed[2] * _istride, _istride};
std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride,
_onembed[2] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
}
}
template <class Desc_t>
void swap_distance(std::shared_ptr<Desc_t> desc) {
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist);
std::int64_t temp = _bwd_dist;
_bwd_dist = _fwd_dist;
_fwd_dist = temp;
}
template <bool Is_inplace, class Desc_t>
void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) {
std::int64_t forward_distance = 0;
std::int64_t backward_distance = 0;
#define SET_STRIDE \
{ \
if (_direction == fft_direction::forward) { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
real_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
complex_stride); \
} else { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
complex_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
real_stride); \
} \
}
if (_dim == 1) {
if constexpr (Is_inplace) {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = 2 * (_n[0] / 2 + 1);
backward_distance = _n[0] / 2 + 1;
} else {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = _n[0];
backward_distance = _n[0] / 2 + 1;
}
} else if (_dim == 2) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * 2 * (_n[1] / 2 + 1);
backward_distance = _n[0] * (_n[1] / 2 + 1);
} else {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, _n[1], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1];
backward_distance = _n[0] * (_n[1] / 2 + 1);
}
} else if (_dim == 3) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1),
2 * (_n[2] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1);
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
} else {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * _n[2];
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
}
}
#undef SET_STRIDE
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
forward_distance);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
backward_distance);
}
#define COMPUTE(DESC) \
{ \
if (_is_inplace) { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input); \
} \
} else { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
auto data_output = \
dpct::detail::get_memory(reinterpret_cast<T *>(output)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \
} \
} \
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_complex(T *input, T *output, fft_direction direction) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The complex domain descriptor need different config values if the
// FFT direction or placement is different.
// Here we check the conditions, and new config values are set and
// re-committed if needed.
if (direction != _direction || is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
if (direction != _direction) {
swap_distance(_desc_sc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_sc->commit(*_q);
} else {
if (direction != _direction) {
swap_distance(_desc_dc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_dc->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sc);
} else {
COMPUTE(_desc_dc);
}
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_real(T *input, T *output) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The real domain descriptor need different config values if the
// FFT placement is different.
// Here we check the condition, and new config values are set and
// re-committed if needed.
if (is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<true>(_desc_sr);
} else {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
} else {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<true>(_desc_dr);
} else {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic) set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sr);
} else {
COMPUTE(_desc_dr);
}
}
#undef COMPUTE
private:
sycl::queue *_q = nullptr;
int _dim;
std::vector<std::int64_t> _n;
std::vector<std::int64_t> _inembed;
std::int64_t _istride;
std::int64_t _fwd_dist;
library_data_t _input_type;
std::vector<std::int64_t> _onembed;
std::int64_t _ostride;
std::int64_t _bwd_dist;
library_data_t _output_type;
std::int64_t _batch = 1;
bool _is_basic = false;
bool _is_inplace = false;
fft_direction _direction = fft_direction::forward;
bool _is_user_specified_dir_and_placement = false;
bool _use_external_workspace = false;
void *_external_workspace_ptr = nullptr;
size_t _workspace_bytes = 0;
bool _is_estimate_call = false;
size_t _workspace_estimate_bytes = 0;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>
_desc_sr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>
_desc_dr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_sc;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_dc;
};
using fft_engine_ptr = fft_engine *;
} // namespace fft
} // namespace dpct
#endif // __DPCT_FFT_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/lib_common_utils.hpp | //==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LIB_COMMON_UTILS_HPP__
#define __DPCT_LIB_COMMON_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace detail {
template <typename T>
inline auto get_memory(T *x) {
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<std::remove_cv_t<T>>(x);
#else
return x;
#endif
}
template <typename T>
inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) {
using Ty = typename DataType<T>::T2;
Ty s_h;
detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait();
return s_h;
}
} // namespace detail
enum class version_field : int { major, minor, update, patch };
/// Returns the requested field of Intel(R) oneAPI Math Kernel Library version.
/// \param field The version information field (major, minor, update or patch).
/// \param result The result value.
inline void mkl_get_version(version_field field, int *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
MKLVersion version;
mkl_get_version(&version);
if (version_field::major == field) {
*result = version.MajorVersion;
} else if (version_field::minor == field) {
*result = version.MinorVersion;
} else if (version_field::update == field) {
*result = version.UpdateVersion;
} else if (version_field::patch == field) {
*result = 0;
} else {
throw std::runtime_error("unknown field");
}
#endif
}
enum class library_data_t : unsigned char {
real_float = 0,
complex_float,
real_double,
complex_double,
real_half,
complex_half,
real_bfloat16,
complex_bfloat16,
real_int4,
complex_int4,
real_uint4,
complex_uint4,
real_int8,
complex_int8,
real_uint8,
complex_uint8,
real_int16,
complex_int16,
real_uint16,
complex_uint16,
real_int32,
complex_int32,
real_uint32,
complex_uint32,
real_int64,
complex_int64,
real_uint64,
complex_uint64,
real_int8_4,
real_int8_32,
real_uint8_4,
library_data_t_size
};
namespace detail {
template <typename ArgT>
inline constexpr std::uint64_t get_type_combination_id(ArgT Val) {
static_assert((unsigned char)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
return (std::uint64_t)Val;
}
template <typename FirstT, typename... RestT>
inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
RestT... RestVal) {
static_assert((std::uint8_t)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
}
inline constexpr std::size_t library_data_size[] = {
8 * sizeof(float), // real_float
8 * sizeof(std::complex<float>), // complex_float
8 * sizeof(double), // real_double
8 * sizeof(std::complex<double>), // complex_double
8 * sizeof(sycl::half), // real_half
8 * sizeof(std::complex<sycl::half>), // complex_half
16, // real_bfloat16
16 * 2, // complex_bfloat16
4, // real_int4
4 * 2, // complex_int4
4, // real_uint4
4 * 2, // complex_uint4
8, // real_int8
8 * 2, // complex_int8
8, // real_uint8
8 * 2, // complex_uint8
16, // real_int16
16 * 2, // complex_int16
16, // real_uint16
16 * 2, // complex_uint16
32, // real_int32
32 * 2, // complex_int32
32, // real_uint32
32 * 2, // complex_uint32
64, // real_int64
64 * 2, // complex_int64
64, // real_uint64
64 * 2, // complex_uint64
8, // real_int8_4
8, // real_int8_32
8 // real_uint8_4
};
} // namespace detail
} // namespace dpct
#endif // __DPCT_LIB_COMMON_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/sparse_utils.hpp | //==---- sparse_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_SPARSE_UTILS_HPP__
#define __DPCT_SPARSE_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "lib_common_utils.hpp"
namespace dpct {
namespace sparse {
/// Describes properties of a sparse matrix.
/// The properties are matrix type, diag, uplo and index base.
class matrix_info {
public:
/// Matrix types are:
/// ge: General matrix
/// sy: Symmetric matrix
/// he: Hermitian matrix
/// tr: Triangular matrix
enum class matrix_type : int { ge = 0, sy, he, tr };
auto get_matrix_type() const { return _matrix_type; }
auto get_diag() const { return _diag; }
auto get_uplo() const { return _uplo; }
auto get_index_base() const { return _index_base; }
void set_matrix_type(matrix_type mt) { _matrix_type = mt; }
void set_diag(oneapi::mkl::diag d) { _diag = d; }
void set_uplo(oneapi::mkl::uplo u) { _uplo = u; }
void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; }
private:
matrix_type _matrix_type = matrix_type::ge;
oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit;
oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper;
oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero;
};
/// Computes a CSR format sparse matrix-dense vector product.
/// y = alpha * op(A) * x + beta * y
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] num_rows Number of rows of the matrix A.
/// \param [in] num_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] x Data of the vector x.
/// \param [in] beta Scaling factor for the vector x.
/// \param [in, out] y Data of the vector y.
template <typename T>
void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows,
int num_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *x, const T *beta,
T *y) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows,
num_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x)));
auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle);
oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::sy: {
oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::tr: {
oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans,
info->get_diag(),
*sparse_matrix_handle);
oneapi::mkl::sparse::trmv(
queue, info->get_uplo(), trans, info->get_diag(), alpha_value,
*sparse_matrix_handle, data_x, beta_value, data_y);
break;
}
default:
throw std::runtime_error(
"the spmv does not support matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
/// Computes a CSR format sparse matrix-dense matrix product.
/// C = alpha * op(A) * B + beta * C
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] sparse_rows Number of rows of the matrix A.
/// \param [in] dense_cols Number of columns of the matrix B or C.
/// \param [in] sparse_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] b Data of the matrix B.
/// \param [in] ldb Leading dimension of the matrix B.
/// \param [in] beta Scaling factor for the matrix B.
/// \param [in, out] c Data of the matrix C.
/// \param [in] ldc Leading dimension of the matrix C.
template <typename T>
void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows,
int dense_cols, int sparse_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *b, int ldb,
const T *beta, T *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows,
sparse_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b)));
auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans,
oneapi::mkl::transpose::nontrans, alpha_value,
*sparse_matrix_handle, data_b, dense_cols, ldb,
beta_value, data_c, ldc);
break;
}
default:
throw std::runtime_error(
"the csrmm does not support matrix_info::matrix_type::sy, "
"matrix_info::matrix_type::tr and matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Saving the optimization information for solving a system of linear
/// equations.
class optimize_info {
public:
/// Constructor
optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); }
/// Destructor
~optimize_info() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destructor.
/// \param [in] e The event which the destructor depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
private:
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
};
#endif
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Performs internal optimizations for solving a system of linear equations for
/// a CSR format sparse matrix.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the sparse matrix.
/// \param [in] row_col Number of rows of the sparse matrix.
/// \param [in] info Matrix info of the sparse matrix.
/// \param [in] val An array containing the non-zero elements of the sparse
/// matrix. \param [in] row_ptr An array of length \p num_rows + 1. \param [in]
/// col_ind An array containing the column indices in index-based numbering.
/// \param [out] optimize_info The result of the optimizations.
template <typename T>
void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans,
int row_col, const std::shared_ptr<matrix_info> info,
const T *val, const int *row_ptr, const int *col_ind,
std::shared_ptr<optimize_info> optimize_info) {
using Ty = typename dpct::DataType<T>::T2;
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(),
row_col, row_col, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
if (info->get_matrix_type() != matrix_info::matrix_type::tr) return;
#ifndef DPCT_USM_LEVEL_NONE
sycl::event e;
e =
#endif
oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans,
info->get_diag(),
optimize_info->get_matrix_handle());
#ifndef DPCT_USM_LEVEL_NONE
optimize_info->add_dependency(e);
#endif
}
#endif
class sparse_matrix_desc;
using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>;
/// Structure for describe a dense vector
class dense_vector_desc {
public:
dense_vector_desc(std::int64_t ele_num, void *value,
library_data_t value_type)
: _ele_num(ele_num), _value(value), _value_type(value_type) {}
void get_desc(std::int64_t *ele_num, const void **value,
library_data_t *value_type) const noexcept {
*ele_num = _ele_num;
*value = _value;
*value_type = _value_type;
}
void get_desc(std::int64_t *ele_num, void **value,
library_data_t *value_type) const noexcept {
get_desc(ele_num, const_cast<const void **>(value), value_type);
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
private:
std::int64_t _ele_num;
void *_value;
library_data_t _value_type;
};
/// Structure for describe a dense matrix
class dense_matrix_desc {
public:
dense_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t leading_dim, void *value,
library_data_t value_type, oneapi::mkl::layout layout)
: _row_num(row_num),
_col_num(col_num),
_leading_dim(leading_dim),
_value(value),
_value_type(value_type),
_layout(layout) {}
void get_desc(std::int64_t *row_num, std::int64_t *col_num,
std::int64_t *leading_dim, void **value,
library_data_t *value_type,
oneapi::mkl::layout *layout) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*leading_dim = _leading_dim;
*value = _value;
*value_type = _value_type;
*layout = _layout;
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
std::int64_t get_col_num() const noexcept { return _col_num; }
std::int64_t get_leading_dim() const noexcept { return _leading_dim; }
oneapi::mkl::layout get_layout() const noexcept { return _layout; }
private:
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _leading_dim;
void *_value;
library_data_t _value_type;
oneapi::mkl::layout _layout;
};
/// Sparse matrix data format
enum matrix_format : int {
csr = 1,
};
/// Sparse matrix attribute
enum matrix_attribute : int { uplo = 0, diag };
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Structure for describe a sparse matrix
class sparse_matrix_desc {
public:
/// Constructor
/// \param [out] desc The descriptor to be created
/// \param [in] row_num Number of rows of the sparse matrix.
/// \param [in] col_num Number of colums of the sparse matrix.
/// \param [in] nnz Non-zero elements in the sparse matrix.
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse
/// matrix. \param [in] row_ptr_type Data type of the \p row_ptr . \param [in]
/// col_ind_type Data type of the \p col_ind . \param [in] base Indicates how
/// input arrays are indexed. \param [in] value_type Data type of the \p value
/// . \param [in] data_format The matrix data format.
sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t nnz, void *row_ptr, void *col_ind,
void *value, library_data_t row_ptr_type,
library_data_t col_ind_type, oneapi::mkl::index_base base,
library_data_t value_type, matrix_format data_format)
: _row_num(row_num),
_col_num(col_num),
_nnz(nnz),
_row_ptr(row_ptr),
_col_ind(col_ind),
_value(value),
_row_ptr_type(row_ptr_type),
_col_ind_type(col_ind_type),
_base(base),
_value_type(value_type),
_data_format(data_format) {
if (_data_format != matrix_format::csr) {
throw std::runtime_error("the sparse matrix data format is unsupported");
}
oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle);
construct();
}
/// Destructor
~sparse_matrix_desc() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destroy method.
/// \param [in] e The event which the destroy method depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
/// Get the values saved in the descriptor
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
/// \param [out] row_ptr An array of length \p row_num + 1.
/// \param [out] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] value An array containing the non-zero elements of the sparse
/// matrix. \param [out] row_ptr_type Data type of the \p row_ptr . \param
/// [out] col_ind_type Data type of the \p col_ind . \param [out] base
/// Indicates how input arrays are indexed. \param [out] value_type Data type
/// of the \p value .
void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz,
void **row_ptr, void **col_ind, void **value,
library_data_t *row_ptr_type, library_data_t *col_ind_type,
oneapi::mkl::index_base *base,
library_data_t *value_type) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
*row_ptr = _row_ptr;
*col_ind = _col_ind;
*value = _value;
*row_ptr_type = _row_ptr_type;
*col_ind_type = _col_ind_type;
*base = _base;
*value_type = _value_type;
}
/// Get the sparse matrix data format of this descriptor
/// \param [out] format The matrix data format result
void get_format(matrix_format *data_format) const noexcept {
*data_format = _data_format;
}
/// Get the index base of this descriptor
/// \param [out] base The index base result
void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; }
/// Get the value pointer of this descriptor
/// \param [out] value The value pointer result
void get_value(void **value) const noexcept { *value = _value; }
/// Set the value pointer of this descriptor
/// \param [in] value The input value pointer
void set_value(void *value) {
// Assume the new data is different from the old data
_value = value;
construct();
}
/// Get the size of the sparse matrix
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
void get_size(int64_t *row_num, int64_t *col_num,
int64_t *nnz) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
}
/// Set the sparse matrix attribute
/// \param [in] attribute The attribute type
/// \param [in] data The attribute value
/// \param [in] data_size The data size of the attribute value
void set_attribute(matrix_attribute attribute, const void *data,
size_t data_size) {
if (attribute == matrix_attribute::diag) {
const oneapi::mkl::diag *diag_ptr =
reinterpret_cast<const oneapi::mkl::diag *>(data);
if (*diag_ptr == oneapi::mkl::diag::unit) {
_diag = oneapi::mkl::diag::unit;
} else if (*diag_ptr == oneapi::mkl::diag::nonunit) {
_diag = oneapi::mkl::diag::nonunit;
} else {
throw std::runtime_error("unsupported diag value");
}
} else if (attribute == matrix_attribute::uplo) {
const oneapi::mkl::uplo *uplo_ptr =
reinterpret_cast<const oneapi::mkl::uplo *>(data);
if (*uplo_ptr == oneapi::mkl::uplo::upper) {
_uplo = oneapi::mkl::uplo::upper;
} else if (*uplo_ptr == oneapi::mkl::uplo::lower) {
_uplo = oneapi::mkl::uplo::lower;
} else {
throw std::runtime_error("unsupported uplo value");
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Get the sparse matrix attribute
/// \param [out] attribute The attribute type
/// \param [out] data The attribute value
/// \param [out] data_size The data size of the attribute value
void get_attribute(matrix_attribute attribute, void *data,
size_t data_size) const {
if (attribute == matrix_attribute::diag) {
oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data);
if (_diag.has_value()) {
*diag_ptr = _diag.value();
} else {
*diag_ptr = oneapi::mkl::diag::nonunit;
}
} else if (attribute == matrix_attribute::uplo) {
oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data);
if (_uplo.has_value()) {
*uplo_ptr = _uplo.value();
} else {
*uplo_ptr = oneapi::mkl::uplo::lower;
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Set the pointers for describing the sparse matrix
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse
/// matrix.
void set_pointers(void *row_ptr, void *col_ind, void *value) {
// Assume the new data is different from the old data
_row_ptr = row_ptr;
_col_ind = col_ind;
_value = value;
construct();
}
/// Get the diag attribute
/// \return diag value
std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; }
/// Get the uplo attribute
/// \return uplo value
std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; }
private:
template <typename index_t, typename value_t>
void set_data() {
auto data_row_ptr =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr));
auto data_col_ind =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind));
auto data_value =
dpct::detail::get_memory(reinterpret_cast<value_t *>(_value));
oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle,
_row_num, _col_num, _base, data_row_ptr,
data_col_ind, data_value);
get_default_queue().wait();
}
void construct() {
std::uint64_t key = dpct::detail::get_type_combination_id(
_row_ptr_type, _col_ind_type, _value_type);
switch (key) {
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_float): {
set_data<std::int32_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_double): {
set_data<std::int32_t, double>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_float): {
set_data<std::int32_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_double): {
set_data<std::int32_t, std::complex<double>>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_float): {
set_data<std::int64_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_double): {
set_data<std::int64_t, double>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_float): {
set_data<std::int64_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_double): {
set_data<std::int64_t, std::complex<double>>();
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _nnz;
void *_row_ptr;
void *_col_ind;
void *_value;
library_data_t _row_ptr_type;
library_data_t _col_ind_type;
oneapi::mkl::index_base _base;
library_data_t _value_type;
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
matrix_format _data_format;
std::optional<oneapi::mkl::uplo> _uplo;
std::optional<oneapi::mkl::diag> _diag;
};
namespace detail {
#ifdef DPCT_USM_LEVEL_NONE
#define SPARSE_CALL(X) \
do { \
X; \
} while (0)
#else
#define SPARSE_CALL(X) \
do { \
sycl::event e = X; \
a->add_dependency(e); \
} while (0)
#endif
template <typename Ty>
inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value()));
auto data_y =
dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value()));
if (a->get_diag().has_value() && a->get_uplo().has_value()) {
oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans,
a->get_diag().value(),
a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::trmv(
queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value,
a->get_matrix_handle(), data_x, beta_value, data_y));
} else {
oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
a->get_matrix_handle(), data_x,
beta_value, data_y));
}
}
template <typename Ty>
inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a,
std::shared_ptr<dense_matrix_desc> b, const void *beta,
std::shared_ptr<dense_matrix_desc> c) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value()));
auto data_c =
dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value()));
SPARSE_CALL(oneapi::mkl::sparse::gemm(
queue, b->get_layout(), trans_a, trans_b, alpha_value,
a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(),
beta_value, data_c, c->get_leading_dim()));
}
#undef SPARSE_CALL
} // namespace detail
/// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta
/// * y. \param [in] queue The queue where the routine should be executed. It
/// must have the in_order property when using the USM mode. \param [in] trans
/// Specifies operation on input matrix. \param [in] alpha Specifies the scalar
/// alpha. \param [in] a Specifies the sparse matrix a. \param [in] x Specifies
/// the dense vector x. \param [in] beta Specifies the scalar beta. \param [in,
/// out] y Specifies the dense vector y. \param [in] data_type Specifies the
/// data type of \param a, \param x and \param y .
inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y,
library_data_t data_type) {
switch (data_type) {
case library_data_t::real_float: {
detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::real_double: {
detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_float: {
detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta,
y);
break;
}
case library_data_t::complex_double: {
detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta,
y);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) +
/// beta * c. \param [in] queue The queue where the routine should be executed.
/// It must have the in_order property when using the USM mode. \param [in]
/// trans_a Specifies operation on input matrix a. \param [in] trans_b Specifies
/// operation on input matrix b. \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] b Specifies the dense matrix b.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] c Specifies the dense matrix c.
/// \param [in] data_type Specifies the data type of \param a, \param b and
/// \param c .
inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b,
const void *beta, std::shared_ptr<dense_matrix_desc> c,
library_data_t data_type) {
if (b->get_layout() != c->get_layout())
throw std::runtime_error("the layout of b and c are different");
switch (data_type) {
case library_data_t::real_float: {
detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::real_double: {
detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::complex_float: {
detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
case library_data_t::complex_double: {
detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
#endif
} // namespace sparse
} // namespace dpct
#endif // __DPCT_SPARSE_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <sycl/sycl.hpp>
#include <thread>
#include <vector>
#if defined(__linux__)
#include <sys/syscall.h>
#include <unistd.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
inline auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::event *event_ptr;
typedef sycl::queue *queue_ptr;
typedef char *device_ptr;
/// Destroy \p event pointed memory.
///
/// \param event Pointer to the sycl::event address.
static void destroy_event(event_ptr event) { delete event; }
class device_info {
public:
// get interface
const char *get_name() const { return _name; }
char *get_name() { return _name; }
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() const {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
bool get_host_unified_memory() const { return _host_unified_memory; }
int get_major_version() const { return _major; }
int get_minor_version() const { return _minor; }
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
int get_max_work_group_size() const { return _max_work_group_size; }
int get_max_sub_group_size() const { return _max_sub_group_size; }
int get_max_work_items_per_compute_unit() const {
return _max_work_items_per_compute_unit;
}
int get_max_register_size_per_work_group() const {
return _max_register_size_per_work_group;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() const {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
size_t get_global_mem_size() const { return _global_mem_size; }
size_t get_local_mem_size() const { return _local_mem_size; }
/// Returns the maximum clock rate of device's global memory in kHz. If
/// compiler does not support this API then returns default value 3200000 kHz.
unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
/// Returns the maximum bus width between device and memory in bits. If
/// compiler does not support this API then returns default value 64 bits.
unsigned int get_memory_bus_width() const { return _memory_bus_width; }
uint32_t get_device_id() const { return _device_id; }
std::array<unsigned char, 16> get_uuid() const { return _uuid; }
// set interface
void set_name(const char *name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_integrated(int integrated) { _integrated = integrated; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void set_max_work_items_per_compute_unit(
int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void set_max_register_size_per_work_group(
int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
void set_device_id(uint32_t device_id) { _device_id = device_id; }
void set_uuid(std::array<unsigned char, 16> uuid) { _uuid = std::move(uuid); }
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
uint32_t _device_id;
std::array<unsigned char, 16> _uuid;
};
/// dpct device extension
class device_ext : public sycl::device {
typedef std::mutex mutex_type;
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
}
device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) {
std::lock_guard<mutex_type> lock(m_mutex);
init_queues();
}
int is_native_atomic_supported() { return 0; }
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
/// Return the maximum clock frequency of this device in KHz.
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
int get_max_sub_group_size() const {
return get_device_info().get_max_sub_group_size();
}
int get_max_register_size_per_work_group() const {
return get_device_info().get_max_register_size_per_work_group();
}
int get_max_work_group_size() const {
return get_device_info().get_max_work_group_size();
}
int get_mem_base_addr_align() const {
return get_info<sycl::info::device::mem_base_addr_align>();
}
size_t get_global_mem_size() const {
return get_device_info().get_global_mem_size();
}
/// Get the number of bytes of free and total memory on the SYCL device.
/// \param [out] free_memory The number of bytes of free memory on the SYCL
/// device. \param [out] total_memory The number of bytes of total memory on
/// the SYCL device.
void get_memory_info(size_t &free_memory, size_t &total_memory) {
#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
if (!has(sycl::aspect::ext_intel_free_memory)) {
std::cerr << "get_memory_info: ext_intel_free_memory is not supported."
<< std::endl;
free_memory = 0;
} else {
free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
}
#else
std::cerr << "get_memory_info: ext_intel_free_memory is not supported."
<< std::endl;
free_memory = 0;
#if defined(_MSC_VER) && !defined(__clang__)
#pragma message("Querying the number of bytes of free memory is not supported")
#else
#warning "Querying the number of bytes of free memory is not supported"
#endif
#endif
total_memory = get_device_info().get_global_mem_size();
}
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION < 20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where
// max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated
// by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>() * 1000);
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0) prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
if (this->has(sycl::aspect::ext_intel_device_id)) {
prop.set_device_id(
this->get_info<sycl::ext::intel::info::device::device_id>());
}
if (this->has(sycl::aspect::ext_intel_device_info_uuid)) {
prop.set_uuid(this->get_info<sycl::ext::intel::info::device::uuid>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message( \
"get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning \
"get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
void reset() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
init_queues();
}
sycl::queue &in_order_queue() { return *_q_in_order; }
sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
sycl::queue &default_queue() {
#ifdef DPCT_USM_LEVEL_NONE
return out_of_order_queue();
#else
return in_order_queue();
#endif // DPCT_USM_LEVEL_NONE
}
void queues_wait_and_throw() {
std::unique_lock<mutex_type> lock(m_mutex);
std::vector<std::shared_ptr<sycl::queue>> current_queues(_queues);
lock.unlock();
for (const auto &q : current_queues) {
q->wait_and_throw();
}
// Guard the destruct of current_queues to make sure the ref count is safe.
lock.lock();
}
sycl::queue *create_queue(bool enable_exception_handler = false) {
#ifdef DPCT_USM_LEVEL_NONE
return create_out_of_order_queue(enable_exception_handler);
#else
return create_in_order_queue(enable_exception_handler);
#endif // DPCT_USM_LEVEL_NONE
}
sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler,
sycl::property::queue::in_order());
}
sycl::queue *create_out_of_order_queue(
bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler);
}
void destroy_queue(sycl::queue *&queue) {
std::lock_guard<mutex_type> lock(m_mutex);
_queues.erase(
std::remove_if(_queues.begin(), _queues.end(),
[=](const std::shared_ptr<sycl::queue> &q) -> bool {
return q.get() == queue;
}),
_queues.end());
queue = nullptr;
}
void set_saved_queue(sycl::queue *q) {
std::lock_guard<mutex_type> lock(m_mutex);
_saved_queue = q;
}
sycl::queue *get_saved_queue() const {
std::lock_guard<mutex_type> lock(m_mutex);
return _saved_queue;
}
sycl::context get_context() const { return _ctx; }
private:
void clear_queues() {
_queues.clear();
_q_in_order = _q_out_of_order = _saved_queue = nullptr;
}
void init_queues() {
_q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
_q_out_of_order = create_queue_impl(true);
_saved_queue = &default_queue();
}
/// Caller should acquire resource \p m_mutex before calling this function.
template <class... Properties>
sycl::queue *create_queue_impl(bool enable_exception_handler,
Properties... properties) {
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh,
sycl::property_list(
#ifdef DPCT_PROFILING_ENABLED
sycl::property::queue::enable_profiling(),
#endif
properties...)));
return _queues.back().get();
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i])) break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.') break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_q_in_order, *_q_out_of_order;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable mutex_type m_mutex;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id = current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &cpu_device() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
if (_cpu_device == -1) {
throw std::runtime_error("no valid cpu device");
} else {
return *_devs[_cpu_device];
}
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it = _thread2dev_map.find(get_tid());
if (it != _thread2dev_map.end()) return it->second;
return DEFAULT_DEVICE_ID;
}
/// Select device with a device ID.
/// \param [in] id The id of the device which can
/// be obtained through get_device_id(const sycl::device).
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()] = id;
}
unsigned int device_count() { return _devs.size(); }
unsigned int get_device_id(const sycl::device &dev) {
unsigned int id = 0;
for (auto dev_item : _devs) {
if (*dev_item == dev) {
break;
}
id++;
}
return id;
}
template <class DeviceSelector>
std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
sycl::device selected_device = sycl::device(selector);
unsigned int selected_device_id = get_device_id(selected_device);
select_device(selected_device_id);
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device = sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu()) _cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current selected device depends on
/// the USM config. Return the default out-of-ordered queue when USM-none is
/// enabled, otherwise return the default in-ordered queue.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the default in-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_in_order_queue() {
return dev_mgr::instance().current_device().in_order_queue();
}
/// Util function to get the default out-of-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_out_of_order_queue() {
return dev_mgr::instance().current_device().out_of_order_queue();
}
/// Util function to get the id of current device in
/// dpct device manager.
static inline unsigned int get_current_device_id() {
return dev_mgr::instance().current_device_id();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
/// Util function to get a device by id.
static inline device_ext &get_device(unsigned int id) {
return dev_mgr::instance().get_device(id);
}
/// Util function to get the context of the default queue of current
/// device in dpct device manager.
static inline sycl::context get_default_context() {
return dpct::get_current_device().get_context();
}
/// Util function to get a CPU device.
static inline device_ext &cpu_device() {
return dev_mgr::instance().cpu_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
template <class DeviceSelector>
static inline std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
dev_mgr::instance().select_device(selector);
}
static inline unsigned int get_device_id(const sycl::device &dev) {
return dev_mgr::instance().get_device_id(dev);
}
/// Util function to check whether a device supports some kinds of sycl::aspect.
inline void has_capability_or_fail(
const sycl::device &dev, const std::initializer_list<sycl::aspect> &props) {
for (const auto &it : props) {
if (dev.has(it)) continue;
switch (it) {
case sycl::aspect::fp64:
throw std::runtime_error("'double' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
case sycl::aspect::fp16:
throw std::runtime_error("'half' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
default:
#define __SYCL_ASPECT(ASPECT, ID) \
case sycl::aspect::ASPECT: \
return #ASPECT;
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
switch (AspectNum) {
#include <sycl/info/aspects.def>
#include <sycl/info/aspects_deprecated.def>
default:
return "unknown aspect";
}
};
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
#undef __SYCL_ASPECT_DEPRECATED
#undef __SYCL_ASPECT
throw std::runtime_error(
"'" + getAspectNameStr(it) + "' is not supported in '" +
dev.get_info<sycl::info::device::name>() + "' device");
}
break;
}
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include <cassert>
#include <cstdint>
#include <cstring>
#include <map>
#include <mutex>
#include <sycl/sycl.hpp>
#include <thread>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include "device.hpp"
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size) return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error(
"dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr) return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension>
class accessor;
template <memory_region Memory, class T = byte_t>
class memory_traits {
public:
static constexpr sycl::access::target target = sycl::access::target::device;
static constexpr sycl::access_mode mode = (Memory == constant)
? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr, int value,
size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event> dpct_memset(sycl::queue &q,
pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event> dpct_memset(sycl::queue &q, void *ptr,
size_t pitch, int val,
size_t x, size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction direction_table
[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] = {
{memcpy_direction::host_to_host, memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q,
from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event dpct_memcpy(
sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size) return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice, size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event> dpct_memcpy(
sycl::queue &q, void *to_ptr, const void *from_ptr, sycl::range<3> to_range,
sycl::range<3> from_range, sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), to_surface,
buf.get_size(), device_to_host,
dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(), device_to_host,
dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)),
from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size, [=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size, [=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event> dpct_memcpy(
sycl::queue &q, pitched_data to, sycl::id<3> to_id, pitched_data from,
sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id,
from_id, size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event> dpct_memcpy(
sycl::queue &q, void *to_ptr, const void *from_ptr, size_t to_pitch,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1), sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U>
struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const {
return _impl == other._impl;
}
bool operator!=(const usm_allocator &other) const {
return _impl != other._impl;
}
};
} // namespace deprecated
inline void dpct_free(void *ptr, const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template <class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T>
static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr) return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode> get_access(const void *ptr,
sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T>
static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr = BufferOffset.first.get_host_access().get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data dpct_malloc(sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr, sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be
/// freed. \param events The events to be waited. \param q The sycl::queue the
/// memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void async_dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y, sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val, sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension>
class accessor;
template <class T, memory_region Memory>
class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory>
class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)),
_range(range_in),
_reference(false),
_host_ptr(nullptr),
_device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference) dpct::dpct_free(_device_ptr);
if (_host_ptr) std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() { init(dpct::get_default_queue()); }
/// Allocate memory with specified queue, and init memory if has initial
/// value.
void init(sycl::queue &q) {
if (_device_ptr) return;
if (!_size) return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() { return get_ptr(get_default_queue()); }
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type get_access(
sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size),
_range(size / sizeof(T)),
_reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(_size, q.get_device(),
q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
} // namespace detail
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator =
detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator =
detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr, sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type != sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type != sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device)
? ptr
: nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() { return memory_type; }
const void *get_device_pointer() { return device_pointer; }
const void *get_host_pointer() { return host_pointer; }
bool is_memory_shared() { return memory_type == sycl::usm::alloc::shared; }
unsigned int get_device_id() { return device_id; }
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include "dpl_extras/algorithm.h"
#include "dpl_extras/dpcpp_extensions.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/memory.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/vector.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T>
bool isnan(const T a) {
return sycl::isnan(a);
}
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i) f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T>
inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i) ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool> compare_both(
const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool> unordered_compare_both(
const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T> compare(
const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T> unordered_compare(
const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T>
inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f) return 0.f;
return a;
}
template <class T>
inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T>
inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b)) return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T>
inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b)) return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T>
auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T>
auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T>
inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T>
inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T>
inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include <thread>
#include <utility>
#include <vector>
#include "lib_common_utils.hpp"
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array) sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template <typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced) _temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced) return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy,
res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy,
res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy,
res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
std::uint64_t key =
detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(q, n, reinterpret_cast<const float *>(x),
incx, reinterpret_cast<const float *>(y),
incy, reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val, data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val, data_x, incx, data_y,
incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx, data_y, incy,
c_value, s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(q, a_trans, b_trans, m, n, k,
alpha_value, data_a, lda, data_b, ldb,
beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, stride_a, data_b,
ldb, stride_b, beta_value, data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T +
// beta*C For Hermitian matrix, this function performs: C =
// alpha*OP(A)*(OP(B))^H + beta*C The gemmt() function performs: C =
// alpha*OPA(A)*OPB(B) + beta*C So the OPB need be updated before we call
// gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> &&
trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer =
sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, new_B_buffer,
origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols, beta_value,
data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(q, uplo, trans_A, trans_B, n, k,
alpha_value, data_a, lda, data_b,
ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, int lda,
void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info),
uplo_info(uplo_info),
transpose_info(transpose_info),
diag_info(diag_info),
value_info(value_info),
groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[], int lda,
int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size,
scratchpad, scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad, scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer,
ldb, stride_b, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue
.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
})
.wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared,
b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n, const T *a[],
int lda, int *ipiv, T *b[], int ldb, int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b,
ipiv_buf, stride_ipiv, batch_size,
scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared,
b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n, T *a[],
int lda, T *tau[], int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size =
oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size,
scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array) free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
(Ty **)tau_shared, 1, &group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x,
library_data_t x_type, int incx, void *y,
library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y,
incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type, int incx,
void *y, library_data_t y_type, int incy, const void *c,
const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y,
incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y,
incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(
q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(
q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y,
incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the
/// matrix C. \param [in] n Specifies the number of columns of the matrix op(B)
/// and of the matrix C. \param [in] k Specifies the number of columns of the
/// matrix op(A) and the number of rows of the matrix op(B). \param [in] alpha
/// Scaling factor for the matrix-matrix product. \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half,
c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb,
&beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the
/// matrix C. \param [in] n Specifies the number of columns of the matrix op(B)
/// and of the matrix C. \param [in] k Specifies the number of columns of the
/// matrix op(A) and the number of rows of the matrix op(B). \param [in] alpha
/// Scaling factor for the matrix-matrix product. \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to
/// perform. \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
float, float>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb,
&beta_float, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half,
c, ldc, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the
/// matrix C. \param [in] n Specifies the number of columns of the matrix op(B)
/// and of the matrix C. \param [in] k Specifies the number of columns of the
/// matrix op(A) and the number of rows of the matrix op(B). \param [in] alpha
/// Scaling factor for the matrix-matrix product. \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to
/// perform. \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
float, float>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb,
stride_b, beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb,
stride_b, &beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower
/// triangle. \param [in] trans Specifies the operation to apply. \param [in] n
/// The number of rows and columns in C. \param [in] k The inner dimension of
/// matrix multiplications. \param [in] alpha Scaling factor for the rank-k
/// update. \param [in] a Input matrix A. \param [in] lda Leading dimension of
/// A. \param [in] b Input matrix B. \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b, ldb,
beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower
/// triangle. \param [in] trans Specifies the operation to apply. \param [in] n
/// The number of rows and columns in C. \param [in] k The inner dimension of
/// matrix multiplications. \param [in] alpha Scaling factor for the rank-k
/// update. \param [in] a Input matrix A. \param [in] lda Leading dimension of
/// A. \param [in] b Input matrix B. \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b, ldb,
beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at
/// the addr and assign the result to the value at addr. \param [in, out] addr
/// The pointer to the data. \param operand The value to use in bitwise AND
/// operation with the value at the \p addr. \param memoryOrder The memory
/// ordering used. \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at
/// the addr and assign the result to the value at addr. \param [in, out] addr
/// The pointer to the data. \param operand The value to use in bitwise AND
/// operation with the value at the \p addr. \param memoryOrder The memory
/// ordering used. \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at
/// the \p addr. \param memoryOrder The memory ordering used. \returns The value
/// at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value
/// stored in \p addr is equal to zero or greater than \p operand, else decrease
/// the value stored in \p addr. \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm =
sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(
addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand)) break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm =
sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(
addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0)) break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand, sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false &&
"Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value
/// expected. Returns the value at the \p addr before the call. \param [in, out]
/// addr Multi_ptr. \param expected The value to compare against the value at \p
/// addr. \param desired The value to assign to \p addr if the value at \p addr
/// is expected. \param success The memory ordering used when comparison
/// succeeds. \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value
/// expected. Returns the value at the \p addr before the call. \param [in] addr
/// The pointer to the data. \param expected The value to compare against the
/// value at \p addr. \param desired The value to assign to \p addr if the value
/// at \p addr is expected. \param success The memory ordering used when
/// comparison succeeds. \param fail The memory ordering used when comparison
/// fails. \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail {
template <typename T>
struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic {
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope,
Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope,
Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic
/// argument \param operand The value to replace the pointed value. \param
/// memoryOrder The memory ordering used. \param memoryScope The memory scope
/// used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the
/// value held previously \param operand The value to replace the pointed
/// value. \param memoryOrder The memory ordering used. \param memoryScope The
/// memory scope used. \returns The value of the referenced object before the
/// call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic
/// argument and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param success The memory models for the
/// read-modify-write \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false
/// otherwise.
bool compare_exchange_weak(
T &expected, T desired, sycl::memory_order success,
sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure,
memoryScope);
}
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param memoryOrder The memory
/// synchronization ordering for operations \param memoryScope The memory
/// scope used. \returns true if the referenced object was successfully
/// changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder,
memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic
/// argument and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param success The memory models for the
/// read-modify-write \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false
/// otherwise.
bool compare_exchange_strong(
T &expected, T desired, sycl::memory_order success,
sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure,
memoryScope);
}
/// \param expected The value expected to be found in the object referenced by
/// the atomic_ref object \param desired The value to store in the referenced
/// object if it is as expected \param memoryOrder The memory
/// synchronization ordering for operations \param memoryScope The memory
/// scope used. \returns true if the referenced object was successfully
/// changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder,
memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and
/// obtains the value held previously \param operand The other argument of
/// arithmetic addition \param memoryOrder The memory ordering used. \param
/// memoryScope The memory scope used. \returns The value of the referenced
/// object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic
/// object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t>
class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc>
class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc>
struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T>
auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp>
class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp>
class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp>
struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp> operator()(
const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T>
struct __zip_iterator_impl;
template <class... Ts>
struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator operator+(
difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT>
struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(
std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type,
Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(
std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type,
Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1> remove_if(
Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(
policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(
policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result,
Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6> set_symmetric_difference(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_symmetric_difference(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(
Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1> stable_partition(
Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1> partition(
Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void mark_segments(_ExecutionPolicy &&policy,
OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n,
int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters) keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments <
512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments <
512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value,
StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable {
using type = T;
};
template <>
struct make_allocatable<void> {
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T>
class device_pointer;
#endif
template <typename T>
struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T>
void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType *)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T>
class device_iterator;
template <typename ValueType, typename Derived>
class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T>
class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T>
void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T>
device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer>
Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T>
const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T>
T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T>
const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T>
T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <algorithm>
#include <iterator>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include <vector>
#include "../device.hpp"
#include "memory.h"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA>
operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()),
_size(other.size()),
_capacity(other.capacity()),
_storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(
InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(
InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(
InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()),
_storage(v.real_begin()),
_size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void assign(
InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0) --_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp,
tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void insert(
iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp,
tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA>
operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void assign(
InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0) --_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void insert(
iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <stdexcept>
#include <sycl/sycl.hpp>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args>
constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void exclusive_scan(const Item &item,
T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T exclusive_scan(const Item &item, T input, T init,
BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0>
struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT>
struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false>
class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void rank_keys(const Item &item,
uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD],
int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void exclusive_downsweep(const Item &item,
packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U>
struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U>
struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U>
struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U>
struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T>
struct traits : base_traits<T, T> {};
template <>
struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <>
struct traits<int> : base_traits<int, uint32_t> {};
template <>
struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N>
struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD>
class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void scatter_to_blocked(Item item,
T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void sort(const Item &item, T (&keys)[VALUES_PER_THREAD],
int begin_bit = 0, int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit) break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T reduce(Item item, T (&inputs)[VALUES_PER_THREAD],
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>,
T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void inclusive_scan(const Item &item,
T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD],
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args>
constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts>
struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp>
struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp>
struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/guided_OceanFFT_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T>
struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T>
struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp>
class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp>
struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp>
class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp>
class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName>
struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less>
struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate>
struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1>
result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate>
struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1>
result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T>
struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T>
result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate>
struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T>
result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate>
struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2>
T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T &&t) const {
using std::get;
if (pred(get<0>(t))) get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op)
: pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T &&t) const {
using std::get;
if (pred(get<1>(t))) get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t))) get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N>
struct uint_byte_map {};
template <>
struct uint_byte_map<1> {
using type = uint8_t;
};
template <>
struct uint_byte_map<2> {
using type = uint16_t;
};
template <>
struct uint_byte_map<4> {
using type = uint32_t;
};
template <>
struct uint_byte_map<8> {
using type = uint64_t;
};
template <typename T>
struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT>
class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/DiscreteCosineTransform/src/DCT.hpp | #pragma pack(push, 1)
// This is the data structure which is going to represent one pixel value in RGB
// format
typedef struct {
unsigned char blue;
unsigned char green;
unsigned char red;
} rgb;
// This block is only used when build for Structure of Arays (SOA) with Array
// Notation
typedef struct {
unsigned char *blue;
unsigned char *green;
unsigned char *red;
} SOA_rgb;
#pragma pack(pop)
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SpectralMethods/DiscreteCosineTransform/src/DCT.cpp | #include "DCT.hpp"
#include <sycl/sycl.hpp>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include "dpc_common.hpp"
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
using namespace dpc_common;
using namespace sycl;
#ifdef PERF_NUM
constexpr int num_tests = 5;
#endif
constexpr int block_dims = 8;
constexpr int block_size = 64;
// API for creating 8x8 DCT matrix
void CreateDCT(float matrix[block_size]) {
int temp[block_dims];
for (int i = 0; i < block_dims; ++i) temp[i] = i;
for (int i = 0; i < block_dims; ++i) {
for (int j = 0; j < block_dims; ++j) {
if (i == 0)
matrix[(i * block_dims) + j] = (1 / sycl::sqrt((float)block_dims));
else
matrix[(i * block_dims) + j] =
sycl::sqrt((float)2 / block_dims) *
sycl::cos(((((float)2 * temp[j]) + 1) * i * 3.14f) /
(2 * block_dims));
}
}
}
// Transposes an 8x8 matrix x and writes output to xinv
void MatrixTranspose(float x[block_size], float xinv[block_size]) {
for (int i = 0; i < block_dims; ++i) {
for (int j = 0; j < block_dims; ++j)
xinv[(j * block_dims) + i] = x[(i * block_dims) + j];
}
}
// Multiply two matrices x and y and write output to xy
SYCL_EXTERNAL void MatrixMultiply(float x[block_size], float y[block_size],
float xy[block_size]) {
for (int i = 0; i < block_dims; ++i) {
for (int j = 0; j < block_dims; ++j) {
xy[(i * block_dims) + j] = 0;
for (int k = 0; k < block_dims; ++k)
xy[(i * block_dims) + j] +=
(x[(i * block_dims) + k] * y[(k * block_dims) + j]);
}
}
}
// Processes an individual 8x8 subset of image data
SYCL_EXTERNAL void ProcessBlock(rgb* indataset, rgb* outdataset,
float dct[block_size], float dctinv[block_size],
int start_index, int width) {
float interim[block_size], product[block_size], red_input[block_size],
blue_input[block_size], green_input[block_size], temp[block_size];
/*
// Quantization matrix which does 50% quantization
float quant[64] = {16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68, 109, 103, 77,
24, 35, 55, 64, 81, 104, 113, 92,
49, 64, 78, 87, 103, 121, 120, 101,
72, 92, 95, 98, 112, 100, 103, 99 };
*/
// Quantization matrix which does 90% quantization
float quant[64] = {3, 2, 2, 3, 5, 8, 10, 12,
2, 2, 3, 4, 5, 12, 12, 11,
3, 3, 3, 5, 8, 11, 14, 11,
3, 3, 4, 6, 10, 17, 16, 12,
4, 4, 7, 11, 14, 22, 21, 15,
5, 7, 11, 13, 16, 12, 23, 18,
10, 13, 16, 17, 21, 24, 24, 21,
14, 18, 19, 20, 22, 20, 20, 20};
/*
// Quantization matrix which does 10% quantization
float quant[64] = {80, 60, 50, 80, 120, 200, 255, 255,
55, 60, 70, 95, 130, 255, 255, 255,
70, 65, 80, 120, 200, 255, 255, 255,
70, 85, 110, 145, 255, 255, 255, 255,
90, 110, 185, 255, 255, 255, 255, 255,
120, 175, 255, 255, 255, 255, 255, 255,
245, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255 };
*/
// PROCESS RED CHANNEL
// Translating the pixels values from [0, 255] range to [-128, 127] range
for (int i = 0; i < block_size; ++i) {
int pixel_index = i / block_dims * width + i % block_dims;
red_input[i] = indataset[start_index + pixel_index].red;
red_input[i] -= 128;
}
// Computation of the discrete cosine transform of the image section of size
// 8x8 for red values
MatrixMultiply(dct, red_input, temp);
MatrixMultiply(temp, dctinv, interim);
// Computation of quantization phase using the quantization matrix
for (int i = 0; i < block_size; ++i)
interim[i] = sycl::floor((interim[i] / quant[i]) + 0.5f);
// Computation of dequantizing phase using the same above quantization matrix
for (int i = 0; i < block_size; ++i)
interim[i] = sycl::floor((interim[i] * quant[i]) + 0.5f);
// Computation of Inverse Discrete Cosine Transform (IDCT)
MatrixMultiply(dctinv, interim, temp);
MatrixMultiply(temp, dct, product);
// Translating the pixels values from [-128, 127] range to [0, 255] range
// and writing to output image data
for (int i = 0; i < block_size; ++i) {
int pixel_index = i / block_dims * width + i % block_dims;
float temp = (product[i] + 128);
outdataset[start_index + pixel_index].red =
(temp > 255.f) ? 255 : (unsigned char)temp;
}
// PROCESS BLUE CHANNEL
// Translating the pixels values from [0, 255] range to [-128, 127] range
for (int i = 0; i < block_size; ++i) {
int pixel_index = i / block_dims * width + i % block_dims;
blue_input[i] = indataset[start_index + pixel_index].blue;
blue_input[i] -= 128;
}
// Computation of the discrete cosine transform of the image section of size
// 8x8 for blue values
MatrixMultiply(dct, blue_input, temp);
MatrixMultiply(temp, dctinv, interim);
// Computation of quantization phase using the quantization matrix
for (int i = 0; i < block_size; ++i)
interim[i] = sycl::floor((interim[i] / quant[i]) + 0.5f);
// Computation of dequantizing phase using the same above quantization matrix
for (int i = 0; i < block_size; ++i)
interim[i] = sycl::floor((interim[i] * quant[i]) + 0.5f);
// Computation of Inverse Discrete Cosine Transform (IDCT)
MatrixMultiply(dctinv, interim, temp);
MatrixMultiply(temp, dct, product);
// Translating the pixels values from [-128, 127] range to [0, 255] range
// and writing to output image data
for (int i = 0; i < block_size; ++i) {
int pixel_index = i / block_dims * width + i % block_dims;
float temp = product[i] + 128;
outdataset[start_index + pixel_index].blue =
(temp > 255.f) ? 255 : (unsigned char)temp;
}
// PROCESS GREEN CHANNEL
// Translating the pixels values from [0, 255] range to [-128, 127] range
for (int i = 0; i < block_size; ++i) {
int pixel_index = i / block_dims * width + i % block_dims;
green_input[i] = indataset[start_index + pixel_index].green;
green_input[i] -= 128;
}
// Computation of the discrete cosine transform of the image section of size
// 8x8 for green values
MatrixMultiply(dct, green_input, temp);
MatrixMultiply(temp, dctinv, interim);
// Computation of quantization phase using the quantization matrix
for (int i = 0; i < block_size; ++i)
interim[i] = sycl::floor((interim[i] / quant[i]) + 0.5f);
// Computation of dequantizing phase using the same above quantization matrix
for (int i = 0; i < block_size; ++i)
interim[i] = sycl::floor((interim[i] * quant[i]) + 0.5f);
// Computation of Inverse Discrete Cosine Transform (IDCT)
MatrixMultiply(dctinv, interim, temp);
MatrixMultiply(temp, dct, product);
// Translating the pixels values from [-128, 127] range to [0, 255] range
// and writing to output image data
for (int i = 0; i < block_size; ++i) {
int pixel_index = i / block_dims * width + i % block_dims;
float temp = product[i] + 128;
outdataset[start_index + pixel_index].green =
(temp > 255.f) ? 255 : (unsigned char)temp;
}
}
// Breaks the image into 8x8 blocks to process DCT
void ProcessImage(rgb* indataset, rgb* outdataset, int width, int height) {
sycl::queue q(default_selector_v, exception_handler);
std::cout << "Running on "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
try {
int image_size = width * height;
float dct[block_size], dctinv[block_size];
// Creation of 8x8 DCT matrix
CreateDCT(dct);
// Creating a transpose of DCT matrix
MatrixTranspose(dct, dctinv);
buffer indata_buf(indataset, range<1>(image_size));
buffer outdata_buf(outdataset, range<1>(image_size));
buffer dct_buf(dct, range<1>(block_size));
buffer dctinv_buf(dctinv, range<1>(block_size));
q.submit([&](handler& h) {
auto i_acc = indata_buf.get_access(h,read_only);
auto o_acc = outdata_buf.get_access(h);
auto d_acc = dct_buf.get_access(h,read_only);
auto di_acc = dctinv_buf.get_access(h,read_only);
// Processes individual 8x8 chunks in parallel
h.parallel_for(
range<2>(width / block_dims, height / block_dims), [=](auto idx) {
int start_index = idx[0] * block_dims + idx[1] * block_dims * width;
ProcessBlock(i_acc.get_pointer(), o_acc.get_pointer(),
d_acc.get_pointer(), di_acc.get_pointer(), start_index,
width);
});
});
q.wait_and_throw();
} catch (sycl::exception e) {
std::cout << "SYCL exception caught: " << e.what() << "\n";
exit(1);
}
}
// This API does the reading and writing from/to the .bmp file. Also invokes the
// image processing API from here
int ReadProcessWrite(char* input, char* output) {
double timersecs;
#ifdef PERF_NUM
double avg_timersecs = 0;
#endif
// Read in the data from the input image file
int image_width = 0, image_height = 0, num_channels = 0;
rgb* indata = (rgb*)stbi_load(input, &image_width, &image_height,
&num_channels, STBI_rgb);
if (!indata) {
std::cout << "The input file could not be opened. Program will now exit\n";
return 1;
} else if (num_channels != 3) {
std::cout
<< "The input file must be an RGB bmp image. Program will now exit\n";
return 1;
} else if (image_width % block_dims != 0 || image_height % block_dims != 0) {
std::cout
<< "The input image must have dimensions which are a multiple of 8\n";
return 1;
}
std::cout << "Filename: " << input << " W: " << image_width
<< " H: " << image_height << "\n\n";
rgb* outdata = (rgb*)malloc(image_width * image_height * sizeof(rgb));
// Invoking the DCT/Quantization API which does some manipulation on the
// bitmap data read from the input .bmp file
#ifdef PERF_NUM
std::cout << "Run all tests...\n\n";
for (int j = 0; j < num_tests; ++j) {
#endif
std::cout << "Start image processing with offloading to GPU...\n";
{
TimeInterval t;
ProcessImage(indata, outdata, image_width, image_height);
timersecs = t.Elapsed();
}
std::cout << "--The processing time is " << timersecs << " seconds\n\n";
#ifdef PERF_NUM
avg_timersecs += timersecs;
}
#endif
stbi_write_bmp(output, image_width, image_height, 3, outdata);
std::cout << "DCT successfully completed on the device.\n"
"The processed image has been written to " << output << "\n";
#ifdef PERF_NUM
std::cout << "\nAverage time for image processing:\n";
std::cout << "--The average processing time was "
<< avg_timersecs / (float)num_tests << " seconds\n";
#endif
// Freeing dynamically allocated memory
stbi_image_free(indata);
std::free(outdata);
return 0;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Program usage is <modified_program> <inputfile.bmp> "
"<outputfile.bmp>\n";
return 1;
}
return ReadProcessWrite(argv[1], argv[2]);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/SparseLinearAlgebra/merge-spmv/src/spmv.cpp | //==============================================================
// This sample provides a parallel implementation of a merge based sparse matrix
// and vector multiplication algorithm using SYCL. The input matrix is in
// compressed sparse row format.
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
#include <map>
#include <set>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp
#include "dpc_common.hpp"
using namespace std;
using namespace sycl;
// n x n sparse matrix.
constexpr int n = 100 * 1000;
// Number of non zero values in sparse matrix.
constexpr int nonzero = 2 * 1000 * 1000;
// Maximum value of an element in the matrix.
constexpr int max_value = 100;
// Number of repetitions.
constexpr int repetitions = 16;
// Compressed Sparse Row (CSR) representation for sparse matrix.
//
// Example: The following 4 x 4 sparse matrix
//
// a 0 0 0
// b c 0 0
// 0 0 0 d
// 0 0 e f
//
// have 6 non zero elements in it:
//
// Index Row Column Value
// 0 0 0 a
// 1 1 0 b
// 2 1 1 c
// 3 2 3 d
// 4 3 2 e
// 5 3 3 f
//
// Its CSR representation is have three components:
// - Nonzero values: a, b, c, d, e, f
// - Column indices: 0, 0, 1, 3, 2, 3
// - Row offsets: 0, 1, 3, 4, 6
//
// Non zero values and their column indices directly correspond to the entries
// in the above table.
//
// Row offsets are offsets in the values array for the first non zero element of
// each row of the matrix.
//
// Row NonZeros NonZeros_SeenBefore
// 0 1 0
// 1 2 1
// 2 1 3
// 3 2 4
// - - 6
typedef struct {
int *row_offsets;
int *column_indices;
float *values;
} CompressedSparseRow;
// Allocate unified shared memory for storing matrix and vectors so that they
// are accessible from both the CPU and the device (e.g., a GPU).
bool AllocateMemory(queue &q, int thread_count, CompressedSparseRow *matrix,
float **x, float **y_sequential, float **y_parallel,
int **carry_row, float **carry_value) {
matrix->row_offsets = malloc_shared<int>(n + 1, q);
matrix->column_indices = malloc_shared<int>(nonzero, q);
matrix->values = malloc_shared<float>(nonzero, q);
*x = malloc_shared<float>(n, q);
*y_sequential = malloc_shared<float>(n, q);
*y_parallel = malloc_shared<float>(n, q);
*carry_row = malloc_shared<int>(thread_count, q);
*carry_value = malloc_shared<float>(thread_count, q);
return (matrix->row_offsets != nullptr) &&
(matrix->column_indices != nullptr) && (matrix->values != nullptr) &&
(*x != nullptr) && (*y_sequential != nullptr) &&
(*y_parallel != nullptr) && (*carry_row != nullptr) &&
(*carry_value != nullptr);
}
// Free allocated unified shared memory.
void FreeMemory(queue &q, CompressedSparseRow *matrix, float *x,
float *y_sequential, float *y_parallel, int *carry_row,
float *carry_value) {
if (matrix->row_offsets != nullptr) free(matrix->row_offsets, q);
if (matrix->column_indices != nullptr) free(matrix->column_indices, q);
if (matrix->values != nullptr) free(matrix->values, q);
if (x != nullptr) free(x, q);
if (y_sequential != nullptr) free(y_sequential, q);
if (y_parallel != nullptr) free(y_parallel, q);
if (carry_row != nullptr) free(carry_row, q);
if (carry_value != nullptr) free(carry_value, q);
}
// Initialize inputs: sparse matrix and vector.
void InitializeSparseMatrixAndVector(CompressedSparseRow *matrix, float *x) {
map<int, set<int>> indices;
// Randomly choose a set of elements (i.e., row and column pairs) of the
// matrix. These elements will have non zero values.
for (int k = 0; k < nonzero; k++) {
int i = rand() % n;
int j = rand() % n;
if (indices.find(i) == indices.end()) {
indices[i] = {j};
} else if (indices[i].find(j) == indices[i].end()) {
indices[i].insert(j);
} else {
k--;
}
}
int offset = 0;
// Randomly choose non zero values of the sparse matrix.
for (int i = 0; i < n; i++) {
matrix->row_offsets[i] = offset;
if (indices.find(i) != indices.end()) {
set<int> &cols = indices[i];
for (auto it = cols.cbegin(); it != cols.cend(); ++it, ++offset) {
matrix->column_indices[offset] = *it;
matrix->values[offset] = rand() % max_value + 1;
}
}
}
matrix->row_offsets[n] = nonzero;
// Initialize input vector.
for (int i = 0; i < n; i++) {
x[i] = 1;
}
}
// A sequential implementation of merge based sparse matrix and vector
// multiplication algorithm.
//
// Both row offsets and values indices can be thought of as sorted arrays. The
// progression of the computation is similar to that of merging two sorted
// arrays at a conceptual level.
//
// When a row offset and an index of the values array are equal (denoted as '?'
// below), the algorithm starts computing the value of a new element of the
// result vector.
//
// The algorithm continues to accumulate for the same element of the result
// vector otherwise (denoted as '*' below).
//
// Row indices -> 0 1 2 3
// Row offsets -> 0 1 3 4 6
//
// ? 0 a
// ? 1 b
// * 2 c
// ? 3 d
// ? 4 e
// * 5 f
//
// ^ ^
// | |
// | Non zero values
// |
// Indices of values array
void MergeSparseMatrixVector(CompressedSparseRow *matrix, float *x, float *y) {
int row_index = 0;
int val_index = 0;
y[row_index] = 0;
while (val_index < nonzero) {
if (val_index < matrix->row_offsets[row_index + 1]) {
// Accumulate and move down.
y[row_index] +=
matrix->values[val_index] * x[matrix->column_indices[val_index]];
val_index++;
} else {
// Move right.
row_index++;
y[row_index] = 0;
}
}
for (row_index++; row_index < n; row_index++) {
y[row_index] = 0;
}
}
// Merge Coordinate.
typedef struct {
int row_index;
int val_index;
} MergeCoordinate;
// Given linear position on the merge path, find two dimensional merge
// coordinate (row index and value index pair) on the path.
MergeCoordinate MergePathBinarySearch(int diagonal, int *row_offsets) {
// Diagonal search range (in row index space).
int row_min = (diagonal - nonzero > 0) ? (diagonal - nonzero) : 0;
int row_max = (diagonal < n) ? diagonal : n;
// 2D binary search along the diagonal search range.
while (row_min < row_max) {
int pivot = (row_min + row_max) >> 1;
if (row_offsets[pivot + 1] <= diagonal - pivot - 1) {
// Keep top right half of diagonal range.
row_min = pivot + 1;
} else {
// Keep bottom left half of diagonal range.
row_max = pivot;
}
}
MergeCoordinate coordinate;
coordinate.row_index = (row_min < n) ? row_min : n;
coordinate.val_index = diagonal - row_min;
return coordinate;
}
// The parallel implementation of spare matrix, vector multiplication algorithm
// uses this function as a subroutine. Each available thread calls this function
// with identical inputs, except the thread identifier (TID) is unique. Having a
// unique TID, each thread independently identifies its own, non overlapping
// share of the overall work. More importantly, each thread, except possibly the
// last one, handles the same amount of work. This implementation is an
// extension of the sequential implementation of the merge based sparse matrix,
// vector multiplication algorithm. It first identifies its scope of the merge
// and then performs only the amount of work that belongs this thread in the
// cohort of threads.
void MergeSparseMatrixVectorThread(int thread_count, int tid,
CompressedSparseRow matrix, float *x,
float *y, int *carry_row,
float *carry_value) {
int path_length = n + nonzero; // Merge path length.
int items_per_thread = (path_length + thread_count - 1) /
thread_count; // Merge items per thread.
// Find start and end merge path coordinates for this thread.
int diagonal = ((items_per_thread * tid) < path_length)
? (items_per_thread * tid)
: path_length;
int diagonal_end = ((diagonal + items_per_thread) < path_length)
? (diagonal + items_per_thread)
: path_length;
MergeCoordinate path = MergePathBinarySearch(diagonal, matrix.row_offsets);
MergeCoordinate path_end =
MergePathBinarySearch(diagonal_end, matrix.row_offsets);
// Consume items-per-thread merge items.
float dot_product = 0;
for (int i = 0; i < items_per_thread; i++) {
if (path.val_index < matrix.row_offsets[path.row_index + 1]) {
// Accumulate and move down.
dot_product += matrix.values[path.val_index] *
x[matrix.column_indices[path.val_index]];
path.val_index++;
} else {
// Output row total and move right.
y[path.row_index] = dot_product;
dot_product = 0;
path.row_index++;
}
}
// Save carry.
carry_row[tid] = path_end.row_index;
carry_value[tid] = dot_product;
}
// This is the parallel implementation of merge based sparse matrix and vector
// mutiplication algorithm. It works in three steps:
// 1. Initialize elements of the output vector to zero.
// 2. Multiply sparse matrix and vector.
// 3. Fix up rows of the output vector that spanned across multiple threads.
// First two steps are parallel. They utilize all available processors
// (threads). The last step performs a reduction. It could be parallel as well
// but is kept as sequential for the following reasons:
// 1. Number of operation in this step is proportional to the number of
// processors (threads).
// 2. Number of available threads is not too high.
void MergeSparseMatrixVector(queue &q, int compute_units, int work_group_size,
CompressedSparseRow matrix, float *x, float *y,
int *carry_row, float *carry_value) {
int thread_count = compute_units * work_group_size;
// Initialize output vector.
q.parallel_for<class InitializeVector>(
nd_range<1>(compute_units * work_group_size, work_group_size),
[=](nd_item<1> item) {
auto global_id = item.get_global_id(0);
auto items_per_thread = (n + thread_count - 1) / thread_count;
auto start = global_id * items_per_thread;
auto stop = start + items_per_thread;
for (auto i = start; (i < stop) && (i < n); i++) {
y[i] = 0;
}
});
q.wait();
// Multiply sparse matrix and vector.
q.parallel_for<class MergeCsrMatrixVector>(
nd_range<1>(compute_units * work_group_size, work_group_size),
[=](nd_item<1> item) {
auto global_id = item.get_global_id(0);
MergeSparseMatrixVectorThread(thread_count, global_id, matrix, x, y,
carry_row, carry_value);
});
q.wait();
// Carry fix up for rows spanning multiple threads.
for (int tid = 0; tid < thread_count - 1; tid++) {
if (carry_row[tid] < n) {
y[carry_row[tid]] += carry_value[tid];
}
}
}
// Check if two input vectors are equal.
bool VerifyVectorsAreEqual(float *u, float *v) {
for (int i = 0; i < n; i++) {
if (fabs(u[i] - v[i]) > 1E-06) {
return false;
}
}
return true;
}
int main() {
// Sparse matrix.
CompressedSparseRow matrix;
// Input vector.
float *x;
// Vector: result of sparse matrix and vector multiplication.
float *y_sequential;
float *y_parallel;
// Auxiliary storage for parallel computation.
int *carry_row;
float *carry_value;
try {
queue q{default_selector_v};
auto device = q.get_device();
cout << "Device: " << device.get_info<info::device::name>() << "\n";
// Find max number of compute/execution units and max number of threads per
// compute unit.
int compute_units = device.get_info<info::device::max_compute_units>();
int work_group_size = device.get_info<info::device::max_work_group_size>();
int thread_count = compute_units * work_group_size;
// Detect overflow
if ((size_t)thread_count > (size_t)INT_MAX) {
// Scale down work_group_size so within maximum integer range
work_group_size /= 2;
}
cout << "Compute units: " << compute_units << "\n";
cout << "Work group size: " << work_group_size << "\n";
// Allocate memory.
if (!AllocateMemory(q, compute_units * work_group_size, &matrix, &x,
&y_sequential, &y_parallel, &carry_row, &carry_value)) {
cout << "Memory allocation failure.\n";
FreeMemory(q, &matrix, x, y_sequential, y_parallel, carry_row,
carry_value);
return -1;
}
// Initialize.
InitializeSparseMatrixAndVector(&matrix, x);
// Warm up the JIT.
MergeSparseMatrixVector(q, compute_units, work_group_size, matrix, x,
y_parallel, carry_row, carry_value);
// Time executions.
double elapsed_s = 0;
double elapsed_p = 0;
int i;
cout << "Repeating " << repetitions << " times to measure run time ...\n";
for (i = 0; i < repetitions; i++) {
cout << "Iteration: " << (i + 1) << "\n";
// Sequential compute.
dpc_common::TimeInterval timer_s;
MergeSparseMatrixVector(&matrix, x, y_sequential);
elapsed_s += timer_s.Elapsed();
// Parallel compute.
dpc_common::TimeInterval timer_p;
MergeSparseMatrixVector(q, compute_units, work_group_size, matrix, x,
y_parallel, carry_row, carry_value);
elapsed_p += timer_p.Elapsed();
// Verify two results are equal.
if (!VerifyVectorsAreEqual(y_sequential, y_parallel)) {
cout << "Failed to correctly compute!\n";
break;
}
}
if (i == repetitions) {
cout << "Successfully completed sparse matrix and vector "
"multiplication!\n";
elapsed_s /= repetitions;
elapsed_p /= repetitions;
cout << "Time sequential: " << elapsed_s << " sec\n";
cout << "Time parallel: " << elapsed_p << " sec\n";
}
FreeMemory(q, &matrix, x, y_sequential, y_parallel, carry_row, carry_value);
} catch (std::exception const &e) {
cout << "An exception is caught while computing on device.\n";
terminate();
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/OpenCLInterop/src/sycl_with_opencl_objects.dp.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/opencl.h>
#include <stdio.h>
#include <sycl/sycl.hpp>
#include <sycl/backend/opencl.hpp>
using namespace sycl;
constexpr int MAX_SOURCE_SIZE = 0x100000;
constexpr int N = 1024;
int main(int argc, char **argv) {
size_t bytes = sizeof(float) * N;
cl_float *host_a = (cl_float *)malloc(bytes);
cl_float *host_b = (cl_float *)malloc(bytes);
cl_float *host_c = (cl_float *)malloc(bytes);
for (int i = 0; i < N; ++i) {
host_a[i] = i;
host_b[i] = i * 2;
}
FILE *fp;
char *source_str;
size_t source_size;
fp = fopen("vector_add_kernel.cl", "r");
if (!fp) {
std::cerr << "Failed to load kernel file." << std::endl;
}
source_str = (char *)malloc(MAX_SOURCE_SIZE);
source_size = fread(source_str, 1, MAX_SOURCE_SIZE, fp);
fclose(fp);
std::cout << "Kernel Loading Done" << std::endl;
// Get platform and device information
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(0, NULL, &ret_num_platforms);
std::cout << "Platforms Found: " << ret_num_platforms << std::endl;
cl_platform_id *ocl_platforms = (cl_platform_id *)malloc(ret_num_platforms);
ret = clGetPlatformIDs(ret_num_platforms, ocl_platforms, NULL);
// Set Platform to Use
int platform_index = 0;
platform sycl_platform = make_platform<backend::opencl>(ocl_platforms[platform_index]);
std::cout << "Using Platform: "
<< sycl_platform.get_info<info::platform::name>() << std::endl;
ret = clGetDeviceIDs(ocl_platforms[platform_index], CL_DEVICE_TYPE_ALL, 1,
&device_id, &ret_num_devices);
std::cout << "Devices Found: " << ret_num_devices << std::endl;
// Create an OpenCL context and queue
cl_context ocl_context =
clCreateContext(NULL, 1, &device_id, NULL, NULL, &ret);
cl_command_queue ocl_queue =
clCreateCommandQueueWithProperties(ocl_context, device_id, 0, &ret);
// Create a program from the kernel source, and build it
cl_program ocl_program =
clCreateProgramWithSource(ocl_context, 1, (const char **)&source_str,
(const size_t *)&source_size, &ret);
ret = clBuildProgram(ocl_program, 1, &device_id, NULL, NULL, NULL);
// OpenCL Kernel and Memory Objects
cl_kernel ocl_kernel = clCreateKernel(ocl_program, "vector_add", &ret);
cl_mem ocl_buf_a =
clCreateBuffer(ocl_context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
cl_mem ocl_buf_b =
clCreateBuffer(ocl_context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
cl_mem ocl_buf_c =
clCreateBuffer(ocl_context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
clEnqueueWriteBuffer(ocl_queue, ocl_buf_a, CL_TRUE, 0, bytes, host_a, 0, NULL,
NULL);
clEnqueueWriteBuffer(ocl_queue, ocl_buf_b, CL_TRUE, 0, bytes, host_b, 0, NULL,
NULL);
{ // SYCL Application Scope
// Construct SYCL versions of the context, queue, kernel, and buffers
context sycl_context = make_context<backend::opencl>(ocl_context);
queue sycl_queue = make_queue<backend::opencl>(ocl_queue, sycl_context);
std::cout << "Device: "
<< sycl_queue.get_device().get_info<info::device::name>()
<< std::endl;
kernel sycl_kernel = make_kernel<backend::opencl>(ocl_kernel, sycl_context);
buffer<int, 1> sycl_buf_a = make_buffer<backend::opencl, int>(ocl_buf_a, sycl_context);
buffer<int, 1> sycl_buf_b = make_buffer<backend::opencl, int>(ocl_buf_b, sycl_context);
buffer<int, 1> sycl_buf_c = make_buffer<backend::opencl, int>(ocl_buf_c, sycl_context);
sycl_queue.submit([&](handler &h) {
// Create accessors for each of the buffers
accessor a_accessor(sycl_buf_a, h, read_only);
accessor b_accessor(sycl_buf_b, h, read_only);
accessor c_accessor(sycl_buf_c, h, write_only);
// Map kernel arguments to accessors
h.set_args(a_accessor, b_accessor, c_accessor);
// Launch Kernel
h.parallel_for(range<1>(N), sycl_kernel);
});
}
// Read buffer content back to host array
clEnqueueReadBuffer(ocl_queue, ocl_buf_c, CL_TRUE, 0, bytes, host_c, 0, NULL,
NULL);
for (int i = 0; i < N; ++i) {
if (host_c[i] != i * 3) {
std::cout << "Failed!" << std::endl;
return -1;
}
}
std::cout << "Passed!" << std::endl;
ret = clReleaseCommandQueue(ocl_queue);
ret = clReleaseKernel(ocl_kernel);
ret = clReleaseProgram(ocl_program);
ret = clReleaseMemObject(ocl_buf_a);
ret = clReleaseMemObject(ocl_buf_b);
ret = clReleaseMemObject(ocl_buf_c);
ret = clReleaseContext(ocl_context);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/MandelbrotSDL.cpp | #include <SDL2/SDL.h>
#include <sycl/sycl.hpp>
#include <iomanip>
#include <iostream>
#include "Mandel.hpp"
using namespace sycl;
using namespace std;
constexpr int windowWidth = 640;
constexpr int windowHeight = 480;
void ShowDevice(queue& q) {
// Print device info.
auto device = q.get_device();
auto p_name = device.get_platform().get_info<info::platform::name>();
cout << std::setw(20) << "Platform Name: " << p_name << "\n";
auto p_version = device.get_platform().get_info<info::platform::version>();
cout << std::setw(20) << "Platform Version: " << p_version << "\n";
auto d_name = device.get_info<info::device::name>();
cout << std::setw(20) << "Device Name: " << d_name << "\n";
auto max_work_group = device.get_info<info::device::max_work_group_size>();
cout << std::setw(20) << "Max Work Group: " << max_work_group << "\n";
auto max_compute_units = device.get_info<info::device::max_compute_units>();
cout << std::setw(20) << "Max Compute Units: " << max_compute_units << "\n\n";
}
// Function for handling SDL events.
bool handleEvents(const SDL_Event& event, Mandelbrot& mandel)
{
static bool isMousePressed = false;
switch (event.type) {
case SDL_QUIT:
return true;
case SDL_MOUSEBUTTONDOWN:
isMousePressed = true;
return false;
case SDL_MOUSEBUTTONUP:
isMousePressed = false;
return false;
case SDL_MOUSEMOTION:
if(isMousePressed)
{
// Convert relative movment of the mouse from pixels to 0.0 - 1.0 range
double x = event.motion.xrel / (double)windowWidth;
double y = event.motion.yrel / (double)windowHeight;
mandel.pan(x, y);
}
return false;
case SDL_MOUSEWHEEL:
// Get the mouse position on screen in pixels and map it to 0.0 - 1.0 range
int posx, posy;
SDL_GetMouseState(&posx, &posy);
double x = posx / (double)windowWidth;
double y = posy / (double)windowHeight;
mandel.scale(x, y, -event.wheel.preciseY);
return false;
}
return false;
}
#ifdef _WIN32
int wmain()
#elif __linux__
int main()
#endif
{
// Texture size
int width = 1024;
int height = 1024;
int maxIterations = 50;
// Initialize SDL and create requaired structs.
SDL_Init(SDL_INIT_VIDEO);
auto window = SDL_CreateWindow("Mandelbrot", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, windowWidth, windowHeight, 0);
auto renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
auto texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STREAMING, width, height);
// Calculate number of bytes between each row of pixels.
int pitch = sizeof(uint32_t) * width;
// Create Queue with default selector.
queue q(default_selector_v);
ShowDevice(q);
// Create mandelbrot class with width x height texture size, maxIterations, and rendered range from -2 to 2 in X and Y direction
Mandelbrot *mandelbrot;
auto device = q.get_device();
// If the device does not support fp64, we fallback to single precision (float)
if (!device.has(aspect::fp64)) {
std::cout << "Device " << device.get_info<info::device::name>() << " does not support double precision!"
<< " Single precision will be used instead." << std::endl;
// Create mandelbrot class with width x height texture size, maxIterations, and rendered range from -2 to 2 in X and Y direction
mandelbrot = new Mandelbrot(width, height, maxIterations, -2.0f, 2.0f, -2.0f, 2.0f, q);
} else
mandelbrot = new Mandelbrot(width, height, maxIterations, -2.0, 2.0, -2.0, 2.0, q);
uint32_t* pixels;
bool quit = false;
SDL_Event event;
while (!quit)
{
// Wait for SDL event.
SDL_WaitEvent(&event);
quit = handleEvents(event, *mandelbrot);
// Lock texture and update the pixels pointer from where SDL will read data.
SDL_LockTexture(texture, NULL, (void**)&pixels, &pitch);
// Calculate mandelbrot and write pixels to pixel pointer
mandelbrot->Calculate(pixels);
// Unlock the texture.
SDL_UnlockTexture(texture);
// Copy texture to renderer.
SDL_RenderCopy(renderer, texture, NULL, NULL);
// Render present renderer.
SDL_RenderPresent(renderer);
}
// Destroy window and clean the SDL.
SDL_DestroyWindow(window);
SDL_Quit();
delete mandelbrot;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/Mandel.hpp | #pragma once
#include <complex>
#include <sycl/sycl.hpp>
using namespace sycl;
using namespace std;
template <typename T> std::complex<T> complex_square(std::complex<T> c)
{
return std::complex<T>(c.real() * c.real() - c.imag() * c.imag(), c.real() * c.imag() * 2);
}
template <typename T> class MandelParameters {
using ComplexF = std::complex<T>;
public:
MandelParameters() {}
MandelParameters(int width, int height, int maxIterations, T xmin, T xmax, T ymin, T ymax):
width(width),
height(height),
maxIterations(maxIterations),
xmin(xmin),
xmax(xmax),
ymin(ymin),
ymax(ymax) {}
T ScaleRow(int i) const { return xmin + (i * (xmax - xmin) / width); }
// Scale from 0..height to ymin..ymax
T ScaleCol(int i) const { return -(ymin + (i * (ymax - ymin) / height)); }
// Mandelbrot set are points that do not diverge within max_iterations.
int Point(const ComplexF& c) const {
int count = 0;
ComplexF z = 0;
for (int i = 0; i < maxIterations; ++i) {
auto r = z.real();
auto im = z.imag();
// Leave loop if diverging.
if (((r * r) + (im * im)) >= 4.0f) {
break;
}
// z = z * z + c;
z = complex_square<T>(z) + c;
count++;
}
return count;
}
void scale(T xoffset, T yoffset, T scale) {
// calculate cursour position in the mandelbrot space
T x = xmin + (xmax - xmin) * xoffset;
T y = ymin + (ymax - ymin) * yoffset;
// scale the space
xmin *= 1 + 0.02 * scale;
xmax *= 1 + 0.02 * scale;
ymin *= 1 + 0.02 * scale;
ymax *= 1 + 0.02 * scale;
// calculate coursor position in the scaled mandelbrot space
T x2 = xmin + (xmax - xmin) * xoffset;
T y2 = ymin + (ymax - ymin) * yoffset;
// calculate the offset between position before and after scaling
T offset_x = x2 - x;
T offset_y = y2 - y;
// move the space by offset
xmin -= offset_x;
xmax -= offset_x;
ymin -= offset_y;
ymax -= offset_y;
}
void pan(T xoffset, T yoffset) {
// convert the camera movment from 0 - 1.0 range to distance in mandelbrot space.
T w = (xmax - xmin) * xoffset;
T h = (ymax - ymin) * yoffset;
// move the space by offset
xmin -= w;
xmax -= w;
ymin -= h;
ymax -= h;
}
int width;
int height;
int maxIterations;
T xmin;
T xmax;
T ymin;
T ymax;
};
class Mandelbrot {
public:
Mandelbrot(int width, int height, int maxIterations, double xmin, double xmax, double ymin, double ymax, queue& q);
Mandelbrot(int width, int height, int maxIterations, float xmin, float xmax, float ymin, float ymax, queue& q);
void Calculate(uint32_t* pixels);
void scale(double xoffset, double yoffset, double scale);
void pan(double xoffset, double yoffset);
MandelParameters<float> getSPParameters() const { return sp_parameters; }
MandelParameters<double> getDPParameters() const { return dp_parameters; }
private:
void CalculateSP(uint32_t* pixels);
void CalculateDP(uint32_t* pixels);
queue& q;
MandelParameters<float> sp_parameters;
MandelParameters<double> dp_parameters;
bool singlePrecision;
};
Mandelbrot::Mandelbrot(int width, int height, int maxIterations, double xmin, double xmax, double ymin, double ymax, queue& q) :
q(q),
dp_parameters(width, height, maxIterations, xmin, xmax, ymin, ymax),
singlePrecision(false)
{
}
Mandelbrot::Mandelbrot(int width, int height, int maxIterations, float xmin, float xmax, float ymin, float ymax, queue& q) :
q(q),
sp_parameters(width, height, maxIterations, xmin, xmax, ymin, ymax),
singlePrecision(true)
{
}
void Mandelbrot::Calculate(uint32_t* pixels) {
if (singlePrecision)
CalculateSP(pixels);
else
Calculate(pixels);
}
void Mandelbrot::CalculateSP(uint32_t* pixels) {
MandelParameters<float> parameters = getSPParameters();
const int width = parameters.width;
const int height = parameters.height;
const int maxIterations = parameters.maxIterations;
buffer pixelsBuf(pixels, range(width * height));
// We submit a command group to the queue.
q.submit([&](handler& h) {
accessor ldata(pixelsBuf, h, write_only, no_init);
// Iterate over image and compute mandel for each point.
h.parallel_for(range<1>(height * width), [=](auto index) {
int y = index / height;
int x = index % height;
auto c = std::complex<float>(parameters.ScaleRow(x), parameters.ScaleCol(y));
int value = parameters.Point(c);
float normalized = (1.0f * value) / maxIterations;
ldata[index] = uint32_t(normalized * 0xFFFFFF);
ldata[index] <<= 8;
ldata[index] |= 0xFF;
});
}).wait();
}
void Mandelbrot::CalculateDP(uint32_t* pixels) {
MandelParameters<double> parameters = getDPParameters();
const int width = parameters.width;
const int height = parameters.height;
const int maxIterations = parameters.maxIterations;
buffer pixelsBuf(pixels, range(width * height));
// We submit a command group to the queue.
q.submit([&](handler& h) {
accessor ldata(pixelsBuf, h, write_only, no_init);
// Iterate over image and compute mandel for each point.
h.parallel_for(range<1>(height * width), [=](auto index) {
int y = index / height;
int x = index % height;
auto c = std::complex<double>(parameters.ScaleRow(x), parameters.ScaleCol(y));
int value = parameters.Point(c);
double normalized = (1.0 * value) / maxIterations;
ldata[index] = uint32_t(normalized * 0xFFFFFF);
ldata[index] <<= 8;
ldata[index] |= 0xFF;
});
}).wait();
}
void Mandelbrot::scale(double xoffset, double yoffset, double scale)
{
if (singlePrecision)
sp_parameters.scale((float) xoffset, (float) yoffset, (float) scale);
else
dp_parameters.scale(xoffset, yoffset, scale);
}
void Mandelbrot::pan(double xoffset, double yoffset)
{
if (singlePrecision)
sp_parameters.pan((float) xoffset, (float) yoffset);
else
dp_parameters.pan(xoffset, yoffset);
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_hints.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_hints.h
*
* Official documentation for SDL configuration variables
*
* This file contains functions to set and get configuration hints,
* as well as listing each of them alphabetically.
*
* The convention for naming hints is SDL_HINT_X, where "SDL_X" is
* the environment variable that can be used to override the default.
*
* In general these hints are just that - they may or may not be
* supported or applicable on any given platform, but they provide
* a way for an application or user to give the library a hint as
* to how they would like the library to work.
*/
#ifndef SDL_hints_h_
#define SDL_hints_h_
#include "SDL_stdinc.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief A variable controlling whether the Android / iOS built-in
* accelerometer should be listed as a joystick device.
*
* This variable can be set to the following values:
* "0" - The accelerometer is not listed as a joystick
* "1" - The accelerometer is available as a 3 axis joystick (the default).
*/
#define SDL_HINT_ACCELEROMETER_AS_JOYSTICK "SDL_ACCELEROMETER_AS_JOYSTICK"
/**
* \brief Specify the behavior of Alt+Tab while the keyboard is grabbed.
*
* By default, SDL emulates Alt+Tab functionality while the keyboard is grabbed
* and your window is full-screen. This prevents the user from getting stuck in
* your application if you've enabled keyboard grab.
*
* The variable can be set to the following values:
* "0" - SDL will not handle Alt+Tab. Your application is responsible
for handling Alt+Tab while the keyboard is grabbed.
* "1" - SDL will minimize your window when Alt+Tab is pressed (default)
*/
#define SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED "SDL_ALLOW_ALT_TAB_WHILE_GRABBED"
/**
* \brief If set to "0" then never set the top most bit on a SDL Window, even if the video mode expects it.
* This is a debugging aid for developers and not expected to be used by end users. The default is "1"
*
* This variable can be set to the following values:
* "0" - don't allow topmost
* "1" - allow topmost
*/
#define SDL_HINT_ALLOW_TOPMOST "SDL_ALLOW_TOPMOST"
/**
* \brief Android APK expansion main file version. Should be a string number like "1", "2" etc.
*
* Must be set together with SDL_HINT_ANDROID_APK_EXPANSION_PATCH_FILE_VERSION.
*
* If both hints were set then SDL_RWFromFile() will look into expansion files
* after a given relative path was not found in the internal storage and assets.
*
* By default this hint is not set and the APK expansion files are not searched.
*/
#define SDL_HINT_ANDROID_APK_EXPANSION_MAIN_FILE_VERSION "SDL_ANDROID_APK_EXPANSION_MAIN_FILE_VERSION"
/**
* \brief Android APK expansion patch file version. Should be a string number like "1", "2" etc.
*
* Must be set together with SDL_HINT_ANDROID_APK_EXPANSION_MAIN_FILE_VERSION.
*
* If both hints were set then SDL_RWFromFile() will look into expansion files
* after a given relative path was not found in the internal storage and assets.
*
* By default this hint is not set and the APK expansion files are not searched.
*/
#define SDL_HINT_ANDROID_APK_EXPANSION_PATCH_FILE_VERSION "SDL_ANDROID_APK_EXPANSION_PATCH_FILE_VERSION"
/**
* \brief A variable to control whether the event loop will block itself when the app is paused.
*
* The variable can be set to the following values:
* "0" - Non blocking.
* "1" - Blocking. (default)
*
* The value should be set before SDL is initialized.
*/
#define SDL_HINT_ANDROID_BLOCK_ON_PAUSE "SDL_ANDROID_BLOCK_ON_PAUSE"
/**
* \brief A variable to control whether SDL will pause audio in background
* (Requires SDL_ANDROID_BLOCK_ON_PAUSE as "Non blocking")
*
* The variable can be set to the following values:
* "0" - Non paused.
* "1" - Paused. (default)
*
* The value should be set before SDL is initialized.
*/
#define SDL_HINT_ANDROID_BLOCK_ON_PAUSE_PAUSEAUDIO "SDL_ANDROID_BLOCK_ON_PAUSE_PAUSEAUDIO"
/**
* \brief A variable to control whether we trap the Android back button to handle it manually.
* This is necessary for the right mouse button to work on some Android devices, or
* to be able to trap the back button for use in your code reliably. If set to true,
* the back button will show up as an SDL_KEYDOWN / SDL_KEYUP pair with a keycode of
* SDL_SCANCODE_AC_BACK.
*
* The variable can be set to the following values:
* "0" - Back button will be handled as usual for system. (default)
* "1" - Back button will be trapped, allowing you to handle the key press
* manually. (This will also let right mouse click work on systems
* where the right mouse button functions as back.)
*
* The value of this hint is used at runtime, so it can be changed at any time.
*/
#define SDL_HINT_ANDROID_TRAP_BACK_BUTTON "SDL_ANDROID_TRAP_BACK_BUTTON"
/**
* \brief Specify an application name.
*
* This hint lets you specify the application name sent to the OS when
* required. For example, this will often appear in volume control applets for
* audio streams, and in lists of applications which are inhibiting the
* screensaver. You should use a string that describes your program ("My Game
* 2: The Revenge")
*
* Setting this to "" or leaving it unset will have SDL use a reasonable
* default: probably the application's name or "SDL Application" if SDL
* doesn't have any better information.
*
* Note that, for audio streams, this can be overridden with
* SDL_HINT_AUDIO_DEVICE_APP_NAME.
*
* On targets where this is not supported, this hint does nothing.
*/
#define SDL_HINT_APP_NAME "SDL_APP_NAME"
/**
* \brief A variable controlling whether controllers used with the Apple TV
* generate UI events.
*
* When UI events are generated by controller input, the app will be
* backgrounded when the Apple TV remote's menu button is pressed, and when the
* pause or B buttons on gamepads are pressed.
*
* More information about properly making use of controllers for the Apple TV
* can be found here:
* https://developer.apple.com/tvos/human-interface-guidelines/remote-and-controllers/
*
* This variable can be set to the following values:
* "0" - Controller input does not generate UI events (the default).
* "1" - Controller input generates UI events.
*/
#define SDL_HINT_APPLE_TV_CONTROLLER_UI_EVENTS "SDL_APPLE_TV_CONTROLLER_UI_EVENTS"
/**
* \brief A variable controlling whether the Apple TV remote's joystick axes
* will automatically match the rotation of the remote.
*
* This variable can be set to the following values:
* "0" - Remote orientation does not affect joystick axes (the default).
* "1" - Joystick axes are based on the orientation of the remote.
*/
#define SDL_HINT_APPLE_TV_REMOTE_ALLOW_ROTATION "SDL_APPLE_TV_REMOTE_ALLOW_ROTATION"
/**
* \brief A variable controlling the audio category on iOS and Mac OS X
*
* This variable can be set to the following values:
*
* "ambient" - Use the AVAudioSessionCategoryAmbient audio category, will be muted by the phone mute switch (default)
* "playback" - Use the AVAudioSessionCategoryPlayback category
*
* For more information, see Apple's documentation:
* https://developer.apple.com/library/content/documentation/Audio/Conceptual/AudioSessionProgrammingGuide/AudioSessionCategoriesandModes/AudioSessionCategoriesandModes.html
*/
#define SDL_HINT_AUDIO_CATEGORY "SDL_AUDIO_CATEGORY"
/**
* \brief Specify an application name for an audio device.
*
* Some audio backends (such as PulseAudio) allow you to describe your audio
* stream. Among other things, this description might show up in a system
* control panel that lets the user adjust the volume on specific audio
* streams instead of using one giant master volume slider.
*
* This hints lets you transmit that information to the OS. The contents of
* this hint are used while opening an audio device. You should use a string
* that describes your program ("My Game 2: The Revenge")
*
* Setting this to "" or leaving it unset will have SDL use a reasonable
* default: this will be the name set with SDL_HINT_APP_NAME, if that hint is
* set. Otherwise, it'll probably the application's name or "SDL Application"
* if SDL doesn't have any better information.
*
* On targets where this is not supported, this hint does nothing.
*/
#define SDL_HINT_AUDIO_DEVICE_APP_NAME "SDL_AUDIO_DEVICE_APP_NAME"
/**
* \brief Specify an application name for an audio device.
*
* Some audio backends (such as PulseAudio) allow you to describe your audio
* stream. Among other things, this description might show up in a system
* control panel that lets the user adjust the volume on specific audio
* streams instead of using one giant master volume slider.
*
* This hints lets you transmit that information to the OS. The contents of
* this hint are used while opening an audio device. You should use a string
* that describes your what your program is playing ("audio stream" is
* probably sufficient in many cases, but this could be useful for something
* like "team chat" if you have a headset playing VoIP audio separately).
*
* Setting this to "" or leaving it unset will have SDL use a reasonable
* default: "audio stream" or something similar.
*
* On targets where this is not supported, this hint does nothing.
*/
#define SDL_HINT_AUDIO_DEVICE_STREAM_NAME "SDL_AUDIO_DEVICE_STREAM_NAME"
/**
* \brief Specify an application role for an audio device.
*
* Some audio backends (such as Pipewire) allow you to describe the role of
* your audio stream. Among other things, this description might show up in
* a system control panel or software for displaying and manipulating media
* playback/capture graphs.
*
* This hints lets you transmit that information to the OS. The contents of
* this hint are used while opening an audio device. You should use a string
* that describes your what your program is playing (Game, Music, Movie,
* etc...).
*
* Setting this to "" or leaving it unset will have SDL use a reasonable
* default: "Game" or something similar.
*
* On targets where this is not supported, this hint does nothing.
*/
#define SDL_HINT_AUDIO_DEVICE_STREAM_ROLE "SDL_AUDIO_DEVICE_STREAM_ROLE"
/**
* \brief A variable controlling speed/quality tradeoff of audio resampling.
*
* If available, SDL can use libsamplerate ( http://www.mega-nerd.com/SRC/ )
* to handle audio resampling. There are different resampling modes available
* that produce different levels of quality, using more CPU.
*
* If this hint isn't specified to a valid setting, or libsamplerate isn't
* available, SDL will use the default, internal resampling algorithm.
*
* As of SDL 2.26, SDL_ConvertAudio() respects this hint when libsamplerate is available.
*
* This hint is currently only checked at audio subsystem initialization.
*
* This variable can be set to the following values:
*
* "0" or "default" - Use SDL's internal resampling (Default when not set - low quality, fast)
* "1" or "fast" - Use fast, slightly higher quality resampling, if available
* "2" or "medium" - Use medium quality resampling, if available
* "3" or "best" - Use high quality resampling, if available
*/
#define SDL_HINT_AUDIO_RESAMPLING_MODE "SDL_AUDIO_RESAMPLING_MODE"
/**
* \brief A variable controlling whether SDL updates joystick state when getting input events
*
* This variable can be set to the following values:
*
* "0" - You'll call SDL_JoystickUpdate() manually
* "1" - SDL will automatically call SDL_JoystickUpdate() (default)
*
* This hint can be toggled on and off at runtime.
*/
#define SDL_HINT_AUTO_UPDATE_JOYSTICKS "SDL_AUTO_UPDATE_JOYSTICKS"
/**
* \brief A variable controlling whether SDL updates sensor state when getting input events
*
* This variable can be set to the following values:
*
* "0" - You'll call SDL_SensorUpdate() manually
* "1" - SDL will automatically call SDL_SensorUpdate() (default)
*
* This hint can be toggled on and off at runtime.
*/
#define SDL_HINT_AUTO_UPDATE_SENSORS "SDL_AUTO_UPDATE_SENSORS"
/**
* \brief Prevent SDL from using version 4 of the bitmap header when saving BMPs.
*
* The bitmap header version 4 is required for proper alpha channel support and
* SDL will use it when required. Should this not be desired, this hint can
* force the use of the 40 byte header version which is supported everywhere.
*
* The variable can be set to the following values:
* "0" - Surfaces with a colorkey or an alpha channel are saved to a
* 32-bit BMP file with an alpha mask. SDL will use the bitmap
* header version 4 and set the alpha mask accordingly.
* "1" - Surfaces with a colorkey or an alpha channel are saved to a
* 32-bit BMP file without an alpha mask. The alpha channel data
* will be in the file, but applications are going to ignore it.
*
* The default value is "0".
*/
#define SDL_HINT_BMP_SAVE_LEGACY_FORMAT "SDL_BMP_SAVE_LEGACY_FORMAT"
/**
* \brief Override for SDL_GetDisplayUsableBounds()
*
* If set, this hint will override the expected results for
* SDL_GetDisplayUsableBounds() for display index 0. Generally you don't want
* to do this, but this allows an embedded system to request that some of the
* screen be reserved for other uses when paired with a well-behaved
* application.
*
* The contents of this hint must be 4 comma-separated integers, the first
* is the bounds x, then y, width and height, in that order.
*/
#define SDL_HINT_DISPLAY_USABLE_BOUNDS "SDL_DISPLAY_USABLE_BOUNDS"
/**
* \brief Disable giving back control to the browser automatically
* when running with asyncify
*
* With -s ASYNCIFY, SDL2 calls emscripten_sleep during operations
* such as refreshing the screen or polling events.
*
* This hint only applies to the emscripten platform
*
* The variable can be set to the following values:
* "0" - Disable emscripten_sleep calls (if you give back browser control manually or use asyncify for other purposes)
* "1" - Enable emscripten_sleep calls (the default)
*/
#define SDL_HINT_EMSCRIPTEN_ASYNCIFY "SDL_EMSCRIPTEN_ASYNCIFY"
/**
* \brief override the binding element for keyboard inputs for Emscripten builds
*
* This hint only applies to the emscripten platform
*
* The variable can be one of
* "#window" - The javascript window object (this is the default)
* "#document" - The javascript document object
* "#screen" - the javascript window.screen object
* "#canvas" - the WebGL canvas element
* any other string without a leading # sign applies to the element on the page with that ID.
*/
#define SDL_HINT_EMSCRIPTEN_KEYBOARD_ELEMENT "SDL_EMSCRIPTEN_KEYBOARD_ELEMENT"
/**
* \brief A variable that controls whether Steam Controllers should be exposed using the SDL joystick and game controller APIs
*
* The variable can be set to the following values:
* "0" - Do not scan for Steam Controllers
* "1" - Scan for Steam Controllers (the default)
*
* The default value is "1". This hint must be set before initializing the joystick subsystem.
*/
#define SDL_HINT_ENABLE_STEAM_CONTROLLERS "SDL_ENABLE_STEAM_CONTROLLERS"
/**
* \brief A variable controlling verbosity of the logging of SDL events pushed onto the internal queue.
*
* This variable can be set to the following values, from least to most verbose:
*
* "0" - Don't log any events (default)
* "1" - Log most events (other than the really spammy ones).
* "2" - Include mouse and finger motion events.
* "3" - Include SDL_SysWMEvent events.
*
* This is generally meant to be used to debug SDL itself, but can be useful
* for application developers that need better visibility into what is going
* on in the event queue. Logged events are sent through SDL_Log(), which
* means by default they appear on stdout on most platforms or maybe
* OutputDebugString() on Windows, and can be funneled by the app with
* SDL_LogSetOutputFunction(), etc.
*
* This hint can be toggled on and off at runtime, if you only need to log
* events for a small subset of program execution.
*/
#define SDL_HINT_EVENT_LOGGING "SDL_EVENT_LOGGING"
/**
* \brief A variable controlling whether raising the window should be done more forcefully
*
* This variable can be set to the following values:
* "0" - No forcing (the default)
* "1" - Extra level of forcing
*
* At present, this is only an issue under MS Windows, which makes it nearly impossible to
* programmatically move a window to the foreground, for "security" reasons. See
* http://stackoverflow.com/a/34414846 for a discussion.
*/
#define SDL_HINT_FORCE_RAISEWINDOW "SDL_HINT_FORCE_RAISEWINDOW"
/**
* \brief A variable controlling how 3D acceleration is used to accelerate the SDL screen surface.
*
* SDL can try to accelerate the SDL screen surface by using streaming
* textures with a 3D rendering engine. This variable controls whether and
* how this is done.
*
* This variable can be set to the following values:
* "0" - Disable 3D acceleration
* "1" - Enable 3D acceleration, using the default renderer.
* "X" - Enable 3D acceleration, using X where X is one of the valid rendering drivers. (e.g. "direct3d", "opengl", etc.)
*
* By default SDL tries to make a best guess for each platform whether
* to use acceleration or not.
*/
#define SDL_HINT_FRAMEBUFFER_ACCELERATION "SDL_FRAMEBUFFER_ACCELERATION"
/**
* \brief A variable that lets you manually hint extra gamecontroller db entries.
*
* The variable should be newline delimited rows of gamecontroller config data, see SDL_gamecontroller.h
*
* This hint must be set before calling SDL_Init(SDL_INIT_GAMECONTROLLER)
* You can update mappings after the system is initialized with SDL_GameControllerMappingForGUID() and SDL_GameControllerAddMapping()
*/
#define SDL_HINT_GAMECONTROLLERCONFIG "SDL_GAMECONTROLLERCONFIG"
/**
* \brief A variable that lets you provide a file with extra gamecontroller db entries.
*
* The file should contain lines of gamecontroller config data, see SDL_gamecontroller.h
*
* This hint must be set before calling SDL_Init(SDL_INIT_GAMECONTROLLER)
* You can update mappings after the system is initialized with SDL_GameControllerMappingForGUID() and SDL_GameControllerAddMapping()
*/
#define SDL_HINT_GAMECONTROLLERCONFIG_FILE "SDL_GAMECONTROLLERCONFIG_FILE"
/**
* \brief A variable that overrides the automatic controller type detection
*
* The variable should be comma separated entries, in the form: VID/PID=type
*
* The VID and PID should be hexadecimal with exactly 4 digits, e.g. 0x00fd
*
* The type should be one of:
* Xbox360
* XboxOne
* PS3
* PS4
* PS5
* SwitchPro
*
* This hint affects what driver is used, and must be set before calling SDL_Init(SDL_INIT_GAMECONTROLLER)
*/
#define SDL_HINT_GAMECONTROLLERTYPE "SDL_GAMECONTROLLERTYPE"
/**
* \brief A variable containing a list of devices to skip when scanning for game controllers.
*
* The format of the string is a comma separated list of USB VID/PID pairs
* in hexadecimal form, e.g.
*
* 0xAAAA/0xBBBB,0xCCCC/0xDDDD
*
* The variable can also take the form of @file, in which case the named
* file will be loaded and interpreted as the value of the variable.
*/
#define SDL_HINT_GAMECONTROLLER_IGNORE_DEVICES "SDL_GAMECONTROLLER_IGNORE_DEVICES"
/**
* \brief If set, all devices will be skipped when scanning for game controllers except for the ones listed in this variable.
*
* The format of the string is a comma separated list of USB VID/PID pairs
* in hexadecimal form, e.g.
*
* 0xAAAA/0xBBBB,0xCCCC/0xDDDD
*
* The variable can also take the form of @file, in which case the named
* file will be loaded and interpreted as the value of the variable.
*/
#define SDL_HINT_GAMECONTROLLER_IGNORE_DEVICES_EXCEPT "SDL_GAMECONTROLLER_IGNORE_DEVICES_EXCEPT"
/**
* \brief If set, game controller face buttons report their values according to their labels instead of their positional layout.
*
* For example, on Nintendo Switch controllers, normally you'd get:
*
* (Y)
* (X) (B)
* (A)
*
* but if this hint is set, you'll get:
*
* (X)
* (Y) (A)
* (B)
*
* The variable can be set to the following values:
* "0" - Report the face buttons by position, as though they were on an Xbox controller.
* "1" - Report the face buttons by label instead of position
*
* The default value is "1". This hint may be set at any time.
*/
#define SDL_HINT_GAMECONTROLLER_USE_BUTTON_LABELS "SDL_GAMECONTROLLER_USE_BUTTON_LABELS"
/**
* \brief A variable controlling whether grabbing input grabs the keyboard
*
* This variable can be set to the following values:
* "0" - Grab will affect only the mouse
* "1" - Grab will affect mouse and keyboard
*
* By default SDL will not grab the keyboard so system shortcuts still work.
*/
#define SDL_HINT_GRAB_KEYBOARD "SDL_GRAB_KEYBOARD"
/**
* \brief A variable containing a list of devices to ignore in SDL_hid_enumerate()
*
* For example, to ignore the Shanwan DS3 controller and any Valve controller, you might
* have the string "0x2563/0x0523,0x28de/0x0000"
*/
#define SDL_HINT_HIDAPI_IGNORE_DEVICES "SDL_HIDAPI_IGNORE_DEVICES"
/**
* \brief A variable controlling whether the idle timer is disabled on iOS.
*
* When an iOS app does not receive touches for some time, the screen is
* dimmed automatically. For games where the accelerometer is the only input
* this is problematic. This functionality can be disabled by setting this
* hint.
*
* As of SDL 2.0.4, SDL_EnableScreenSaver() and SDL_DisableScreenSaver()
* accomplish the same thing on iOS. They should be preferred over this hint.
*
* This variable can be set to the following values:
* "0" - Enable idle timer
* "1" - Disable idle timer
*/
#define SDL_HINT_IDLE_TIMER_DISABLED "SDL_IOS_IDLE_TIMER_DISABLED"
/**
* \brief A variable to control whether certain IMEs should handle text editing internally instead of sending SDL_TEXTEDITING events.
*
* The variable can be set to the following values:
* "0" - SDL_TEXTEDITING events are sent, and it is the application's
* responsibility to render the text from these events and
* differentiate it somehow from committed text. (default)
* "1" - If supported by the IME then SDL_TEXTEDITING events are not sent,
* and text that is being composed will be rendered in its own UI.
*/
#define SDL_HINT_IME_INTERNAL_EDITING "SDL_IME_INTERNAL_EDITING"
/**
* \brief A variable to control whether certain IMEs should show native UI components (such as the Candidate List) instead of suppressing them.
*
* The variable can be set to the following values:
* "0" - Native UI components are not display. (default)
* "1" - Native UI components are displayed.
*/
#define SDL_HINT_IME_SHOW_UI "SDL_IME_SHOW_UI"
/**
* \brief A variable to control if extended IME text support is enabled.
* If enabled then SDL_TextEditingExtEvent will be issued if the text would be truncated otherwise.
* Additionally SDL_TextInputEvent will be dispatched multiple times so that it is not truncated.
*
* The variable can be set to the following values:
* "0" - Legacy behavior. Text can be truncated, no heap allocations. (default)
* "1" - Modern behavior.
*/
#define SDL_HINT_IME_SUPPORT_EXTENDED_TEXT "SDL_IME_SUPPORT_EXTENDED_TEXT"
/**
* \brief A variable controlling whether the home indicator bar on iPhone X
* should be hidden.
*
* This variable can be set to the following values:
* "0" - The indicator bar is not hidden (default for windowed applications)
* "1" - The indicator bar is hidden and is shown when the screen is touched (useful for movie playback applications)
* "2" - The indicator bar is dim and the first swipe makes it visible and the second swipe performs the "home" action (default for fullscreen applications)
*/
#define SDL_HINT_IOS_HIDE_HOME_INDICATOR "SDL_IOS_HIDE_HOME_INDICATOR"
/**
* \brief A variable that lets you enable joystick (and gamecontroller) events even when your app is in the background.
*
* The variable can be set to the following values:
* "0" - Disable joystick & gamecontroller input events when the
* application is in the background.
* "1" - Enable joystick & gamecontroller input events when the
* application is in the background.
*
* The default value is "0". This hint may be set at any time.
*/
#define SDL_HINT_JOYSTICK_ALLOW_BACKGROUND_EVENTS "SDL_JOYSTICK_ALLOW_BACKGROUND_EVENTS"
/**
* \brief A variable controlling whether the HIDAPI joystick drivers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI drivers are not used
* "1" - HIDAPI drivers are used (the default)
*
* This variable is the default for all drivers, but can be overridden by the hints for specific drivers below.
*/
#define SDL_HINT_JOYSTICK_HIDAPI "SDL_JOYSTICK_HIDAPI"
/**
* \brief A variable controlling whether the HIDAPI driver for Nintendo GameCube controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_GAMECUBE "SDL_JOYSTICK_HIDAPI_GAMECUBE"
/**
* \brief A variable controlling whether "low_frequency_rumble" and "high_frequency_rumble" is used to implement
* the GameCube controller's 3 rumble modes, Stop(0), Rumble(1), and StopHard(2)
* this is useful for applications that need full compatibility for things like ADSR envelopes.
* Stop is implemented by setting "low_frequency_rumble" to "0" and "high_frequency_rumble" ">0"
* Rumble is both at any arbitrary value,
* StopHard is implemented by setting both "low_frequency_rumble" and "high_frequency_rumble" to "0"
*
* This variable can be set to the following values:
* "0" - Normal rumble behavior is behavior is used (default)
* "1" - Proper GameCube controller rumble behavior is used
*
*/
#define SDL_HINT_JOYSTICK_GAMECUBE_RUMBLE_BRAKE "SDL_JOYSTICK_GAMECUBE_RUMBLE_BRAKE"
/**
* \brief A variable controlling whether the HIDAPI driver for Nintendo Switch Joy-Cons should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_JOY_CONS "SDL_JOYSTICK_HIDAPI_JOY_CONS"
/**
* \brief A variable controlling whether Nintendo Switch Joy-Con controllers will be combined into a single Pro-like controller when using the HIDAPI driver
*
* This variable can be set to the following values:
* "0" - Left and right Joy-Con controllers will not be combined and each will be a mini-gamepad
* "1" - Left and right Joy-Con controllers will be combined into a single controller (the default)
*/
#define SDL_HINT_JOYSTICK_HIDAPI_COMBINE_JOY_CONS "SDL_JOYSTICK_HIDAPI_COMBINE_JOY_CONS"
/**
* \brief A variable controlling whether Nintendo Switch Joy-Con controllers will be in vertical mode when using the HIDAPI driver
*
* This variable can be set to the following values:
* "0" - Left and right Joy-Con controllers will not be in vertical mode (the default)
* "1" - Left and right Joy-Con controllers will be in vertical mode
*
* This hint must be set before calling SDL_Init(SDL_INIT_GAMECONTROLLER)
*/
#define SDL_HINT_JOYSTICK_HIDAPI_VERTICAL_JOY_CONS "SDL_JOYSTICK_HIDAPI_VERTICAL_JOY_CONS"
/**
* \brief A variable controlling whether the HIDAPI driver for Amazon Luna controllers connected via Bluetooth should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_LUNA "SDL_JOYSTICK_HIDAPI_LUNA"
/**
* \brief A variable controlling whether the HIDAPI driver for Nintendo Online classic controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_NINTENDO_CLASSIC "SDL_JOYSTICK_HIDAPI_NINTENDO_CLASSIC"
/**
* \brief A variable controlling whether the HIDAPI driver for NVIDIA SHIELD controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_SHIELD "SDL_JOYSTICK_HIDAPI_SHIELD"
/**
* \brief A variable controlling whether the HIDAPI driver for PS3 controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI on macOS, and "0" on other platforms.
*
* It is not possible to use this driver on Windows, due to limitations in the default drivers
* installed. See https://github.com/ViGEm/DsHidMini for an alternative driver on Windows.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_PS3 "SDL_JOYSTICK_HIDAPI_PS3"
/**
* \brief A variable controlling whether the HIDAPI driver for PS4 controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_PS4 "SDL_JOYSTICK_HIDAPI_PS4"
/**
* \brief A variable controlling whether extended input reports should be used for PS4 controllers when using the HIDAPI driver.
*
* This variable can be set to the following values:
* "0" - extended reports are not enabled (the default)
* "1" - extended reports
*
* Extended input reports allow rumble on Bluetooth PS4 controllers, but
* break DirectInput handling for applications that don't use SDL.
*
* Once extended reports are enabled, they can not be disabled without
* power cycling the controller.
*
* For compatibility with applications written for versions of SDL prior
* to the introduction of PS5 controller support, this value will also
* control the state of extended reports on PS5 controllers when the
* SDL_HINT_JOYSTICK_HIDAPI_PS5_RUMBLE hint is not explicitly set.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_PS4_RUMBLE "SDL_JOYSTICK_HIDAPI_PS4_RUMBLE"
/**
* \brief A variable controlling whether the HIDAPI driver for PS5 controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_PS5 "SDL_JOYSTICK_HIDAPI_PS5"
/**
* \brief A variable controlling whether the player LEDs should be lit to indicate which player is associated with a PS5 controller.
*
* This variable can be set to the following values:
* "0" - player LEDs are not enabled
* "1" - player LEDs are enabled (the default)
*/
#define SDL_HINT_JOYSTICK_HIDAPI_PS5_PLAYER_LED "SDL_JOYSTICK_HIDAPI_PS5_PLAYER_LED"
/**
* \brief A variable controlling whether extended input reports should be used for PS5 controllers when using the HIDAPI driver.
*
* This variable can be set to the following values:
* "0" - extended reports are not enabled (the default)
* "1" - extended reports
*
* Extended input reports allow rumble on Bluetooth PS5 controllers, but
* break DirectInput handling for applications that don't use SDL.
*
* Once extended reports are enabled, they can not be disabled without
* power cycling the controller.
*
* For compatibility with applications written for versions of SDL prior
* to the introduction of PS5 controller support, this value defaults to
* the value of SDL_HINT_JOYSTICK_HIDAPI_PS4_RUMBLE.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_PS5_RUMBLE "SDL_JOYSTICK_HIDAPI_PS5_RUMBLE"
/**
* \brief A variable controlling whether the HIDAPI driver for Google Stadia controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_STADIA "SDL_JOYSTICK_HIDAPI_STADIA"
/**
* \brief A variable controlling whether the HIDAPI driver for Bluetooth Steam Controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used for Steam Controllers, which requires Bluetooth access
* and may prompt the user for permission on iOS and Android.
*
* The default is "0"
*/
#define SDL_HINT_JOYSTICK_HIDAPI_STEAM "SDL_JOYSTICK_HIDAPI_STEAM"
/**
* \brief A variable controlling whether the HIDAPI driver for Nintendo Switch controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_SWITCH "SDL_JOYSTICK_HIDAPI_SWITCH"
/**
* \brief A variable controlling whether the Home button LED should be turned on when a Nintendo Switch Pro controller is opened
*
* This variable can be set to the following values:
* "0" - home button LED is turned off
* "1" - home button LED is turned on
*
* By default the Home button LED state is not changed. This hint can also be set to a floating point value between 0.0 and 1.0 which controls the brightness of the Home button LED.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_SWITCH_HOME_LED "SDL_JOYSTICK_HIDAPI_SWITCH_HOME_LED"
/**
* \brief A variable controlling whether the Home button LED should be turned on when a Nintendo Switch Joy-Con controller is opened
*
* This variable can be set to the following values:
* "0" - home button LED is turned off
* "1" - home button LED is turned on
*
* By default the Home button LED state is not changed. This hint can also be set to a floating point value between 0.0 and 1.0 which controls the brightness of the Home button LED.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_JOYCON_HOME_LED "SDL_JOYSTICK_HIDAPI_JOYCON_HOME_LED"
/**
* \brief A variable controlling whether the player LEDs should be lit to indicate which player is associated with a Nintendo Switch controller.
*
* This variable can be set to the following values:
* "0" - player LEDs are not enabled
* "1" - player LEDs are enabled (the default)
*/
#define SDL_HINT_JOYSTICK_HIDAPI_SWITCH_PLAYER_LED "SDL_JOYSTICK_HIDAPI_SWITCH_PLAYER_LED"
/**
* \brief A variable controlling whether the HIDAPI driver for Nintendo Wii and Wii U controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* This driver doesn't work with the dolphinbar, so the default is SDL_FALSE for now.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_WII "SDL_JOYSTICK_HIDAPI_WII"
/**
* \brief A variable controlling whether the player LEDs should be lit to indicate which player is associated with a Wii controller.
*
* This variable can be set to the following values:
* "0" - player LEDs are not enabled
* "1" - player LEDs are enabled (the default)
*/
#define SDL_HINT_JOYSTICK_HIDAPI_WII_PLAYER_LED "SDL_JOYSTICK_HIDAPI_WII_PLAYER_LED"
/**
* \brief A variable controlling whether the HIDAPI driver for XBox controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is "0" on Windows, otherwise the value of SDL_HINT_JOYSTICK_HIDAPI
*/
#define SDL_HINT_JOYSTICK_HIDAPI_XBOX "SDL_JOYSTICK_HIDAPI_XBOX"
/**
* \brief A variable controlling whether the HIDAPI driver for XBox 360 controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI_XBOX
*/
#define SDL_HINT_JOYSTICK_HIDAPI_XBOX_360 "SDL_JOYSTICK_HIDAPI_XBOX_360"
/**
* \brief A variable controlling whether the player LEDs should be lit to indicate which player is associated with an Xbox 360 controller.
*
* This variable can be set to the following values:
* "0" - player LEDs are not enabled
* "1" - player LEDs are enabled (the default)
*/
#define SDL_HINT_JOYSTICK_HIDAPI_XBOX_360_PLAYER_LED "SDL_JOYSTICK_HIDAPI_XBOX_360_PLAYER_LED"
/**
* \brief A variable controlling whether the HIDAPI driver for XBox 360 wireless controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI_XBOX_360
*/
#define SDL_HINT_JOYSTICK_HIDAPI_XBOX_360_WIRELESS "SDL_JOYSTICK_HIDAPI_XBOX_360_WIRELESS"
/**
* \brief A variable controlling whether the HIDAPI driver for XBox One controllers should be used.
*
* This variable can be set to the following values:
* "0" - HIDAPI driver is not used
* "1" - HIDAPI driver is used
*
* The default is the value of SDL_HINT_JOYSTICK_HIDAPI_XBOX
*/
#define SDL_HINT_JOYSTICK_HIDAPI_XBOX_ONE "SDL_JOYSTICK_HIDAPI_XBOX_ONE"
/**
* \brief A variable controlling whether the Home button LED should be turned on when an Xbox One controller is opened
*
* This variable can be set to the following values:
* "0" - home button LED is turned off
* "1" - home button LED is turned on
*
* By default the Home button LED state is not changed. This hint can also be set to a floating point value between 0.0 and 1.0 which controls the brightness of the Home button LED. The default brightness is 0.4.
*/
#define SDL_HINT_JOYSTICK_HIDAPI_XBOX_ONE_HOME_LED "SDL_JOYSTICK_HIDAPI_XBOX_ONE_HOME_LED"
/**
* \brief A variable controlling whether the RAWINPUT joystick drivers should be used for better handling XInput-capable devices.
*
* This variable can be set to the following values:
* "0" - RAWINPUT drivers are not used
* "1" - RAWINPUT drivers are used (the default)
*/
#define SDL_HINT_JOYSTICK_RAWINPUT "SDL_JOYSTICK_RAWINPUT"
/**
* \brief A variable controlling whether the RAWINPUT driver should pull correlated data from XInput.
*
* This variable can be set to the following values:
* "0" - RAWINPUT driver will only use data from raw input APIs
* "1" - RAWINPUT driver will also pull data from XInput, providing
* better trigger axes, guide button presses, and rumble support
* for Xbox controllers
*
* The default is "1". This hint applies to any joysticks opened after setting the hint.
*/
#define SDL_HINT_JOYSTICK_RAWINPUT_CORRELATE_XINPUT "SDL_JOYSTICK_RAWINPUT_CORRELATE_XINPUT"
/**
* \brief A variable controlling whether the ROG Chakram mice should show up as joysticks
*
* This variable can be set to the following values:
* "0" - ROG Chakram mice do not show up as joysticks (the default)
* "1" - ROG Chakram mice show up as joysticks
*/
#define SDL_HINT_JOYSTICK_ROG_CHAKRAM "SDL_JOYSTICK_ROG_CHAKRAM"
/**
* \brief A variable controlling whether a separate thread should be used
* for handling joystick detection and raw input messages on Windows
*
* This variable can be set to the following values:
* "0" - A separate thread is not used (the default)
* "1" - A separate thread is used for handling raw input messages
*
*/
#define SDL_HINT_JOYSTICK_THREAD "SDL_JOYSTICK_THREAD"
/**
* \brief Determines whether SDL enforces that DRM master is required in order
* to initialize the KMSDRM video backend.
*
* The DRM subsystem has a concept of a "DRM master" which is a DRM client that
* has the ability to set planes, set cursor, etc. When SDL is DRM master, it
* can draw to the screen using the SDL rendering APIs. Without DRM master, SDL
* is still able to process input and query attributes of attached displays,
* but it cannot change display state or draw to the screen directly.
*
* In some cases, it can be useful to have the KMSDRM backend even if it cannot
* be used for rendering. An app may want to use SDL for input processing while
* using another rendering API (such as an MMAL overlay on Raspberry Pi) or
* using its own code to render to DRM overlays that SDL doesn't support.
*
* This hint must be set before initializing the video subsystem.
*
* This variable can be set to the following values:
* "0" - SDL will allow usage of the KMSDRM backend without DRM master
* "1" - SDL Will require DRM master to use the KMSDRM backend (default)
*/
#define SDL_HINT_KMSDRM_REQUIRE_DRM_MASTER "SDL_KMSDRM_REQUIRE_DRM_MASTER"
/**
* \brief A comma separated list of devices to open as joysticks
*
* This variable is currently only used by the Linux joystick driver.
*/
#define SDL_HINT_JOYSTICK_DEVICE "SDL_JOYSTICK_DEVICE"
/**
* \brief A variable controlling whether joysticks on Linux will always treat 'hat' axis inputs (ABS_HAT0X - ABS_HAT3Y) as 8-way digital hats without checking whether they may be analog.
*
* This variable can be set to the following values:
* "0" - Only map hat axis inputs to digital hat outputs if the input axes appear to actually be digital (the default)
* "1" - Always handle the input axes numbered ABS_HAT0X to ABS_HAT3Y as digital hats
*/
#define SDL_HINT_LINUX_DIGITAL_HATS "SDL_LINUX_DIGITAL_HATS"
/**
* \brief A variable controlling whether digital hats on Linux will apply deadzones to their underlying input axes or use unfiltered values.
*
* This variable can be set to the following values:
* "0" - Return digital hat values based on unfiltered input axis values
* "1" - Return digital hat values with deadzones on the input axes taken into account (the default)
*/
#define SDL_HINT_LINUX_HAT_DEADZONES "SDL_LINUX_HAT_DEADZONES"
/**
* \brief A variable controlling whether to use the classic /dev/input/js* joystick interface or the newer /dev/input/event* joystick interface on Linux
*
* This variable can be set to the following values:
* "0" - Use /dev/input/event*
* "1" - Use /dev/input/js*
*
* By default the /dev/input/event* interfaces are used
*/
#define SDL_HINT_LINUX_JOYSTICK_CLASSIC "SDL_LINUX_JOYSTICK_CLASSIC"
/**
* \brief A variable controlling whether joysticks on Linux adhere to their HID-defined deadzones or return unfiltered values.
*
* This variable can be set to the following values:
* "0" - Return unfiltered joystick axis values (the default)
* "1" - Return axis values with deadzones taken into account
*/
#define SDL_HINT_LINUX_JOYSTICK_DEADZONES "SDL_LINUX_JOYSTICK_DEADZONES"
/**
* \brief When set don't force the SDL app to become a foreground process
*
* This hint only applies to Mac OS X.
*
*/
#define SDL_HINT_MAC_BACKGROUND_APP "SDL_MAC_BACKGROUND_APP"
/**
* \brief A variable that determines whether ctrl+click should generate a right-click event on Mac
*
* If present, holding ctrl while left clicking will generate a right click
* event when on Mac.
*/
#define SDL_HINT_MAC_CTRL_CLICK_EMULATE_RIGHT_CLICK "SDL_MAC_CTRL_CLICK_EMULATE_RIGHT_CLICK"
/**
* \brief A variable controlling whether dispatching OpenGL context updates should block the dispatching thread until the main thread finishes processing
*
* This variable can be set to the following values:
* "0" - Dispatching OpenGL context updates will block the dispatching thread until the main thread finishes processing (default).
* "1" - Dispatching OpenGL context updates will allow the dispatching thread to continue execution.
*
* Generally you want the default, but if you have OpenGL code in a background thread on a Mac, and the main thread
* hangs because it's waiting for that background thread, but that background thread is also hanging because it's
* waiting for the main thread to do an update, this might fix your issue.
*
* This hint only applies to macOS.
*
* This hint is available since SDL 2.24.0.
*
*/
#define SDL_HINT_MAC_OPENGL_ASYNC_DISPATCH "SDL_MAC_OPENGL_ASYNC_DISPATCH"
/**
* \brief A variable setting the double click radius, in pixels.
*/
#define SDL_HINT_MOUSE_DOUBLE_CLICK_RADIUS "SDL_MOUSE_DOUBLE_CLICK_RADIUS"
/**
* \brief A variable setting the double click time, in milliseconds.
*/
#define SDL_HINT_MOUSE_DOUBLE_CLICK_TIME "SDL_MOUSE_DOUBLE_CLICK_TIME"
/**
* \brief Allow mouse click events when clicking to focus an SDL window
*
* This variable can be set to the following values:
* "0" - Ignore mouse clicks that activate a window
* "1" - Generate events for mouse clicks that activate a window
*
* By default SDL will ignore mouse clicks that activate a window
*/
#define SDL_HINT_MOUSE_FOCUS_CLICKTHROUGH "SDL_MOUSE_FOCUS_CLICKTHROUGH"
/**
* \brief A variable setting the speed scale for mouse motion, in floating point, when the mouse is not in relative mode
*/
#define SDL_HINT_MOUSE_NORMAL_SPEED_SCALE "SDL_MOUSE_NORMAL_SPEED_SCALE"
/**
* \brief A variable controlling whether relative mouse mode constrains the mouse to the center of the window
*
* This variable can be set to the following values:
* "0" - Relative mouse mode constrains the mouse to the window
* "1" - Relative mouse mode constrains the mouse to the center of the window
*
* Constraining to the center of the window works better for FPS games and when the
* application is running over RDP. Constraining to the whole window works better
* for 2D games and increases the chance that the mouse will be in the correct
* position when using high DPI mice.
*
* By default SDL will constrain the mouse to the center of the window
*/
#define SDL_HINT_MOUSE_RELATIVE_MODE_CENTER "SDL_MOUSE_RELATIVE_MODE_CENTER"
/**
* \brief A variable controlling whether relative mouse mode is implemented using mouse warping
*
* This variable can be set to the following values:
* "0" - Relative mouse mode uses raw input
* "1" - Relative mouse mode uses mouse warping
*
* By default SDL will use raw input for relative mouse mode
*/
#define SDL_HINT_MOUSE_RELATIVE_MODE_WARP "SDL_MOUSE_RELATIVE_MODE_WARP"
/**
* \brief A variable controlling whether relative mouse motion is affected by renderer scaling
*
* This variable can be set to the following values:
* "0" - Relative motion is unaffected by DPI or renderer's logical size
* "1" - Relative motion is scaled according to DPI scaling and logical size
*
* By default relative mouse deltas are affected by DPI and renderer scaling
*/
#define SDL_HINT_MOUSE_RELATIVE_SCALING "SDL_MOUSE_RELATIVE_SCALING"
/**
* \brief A variable setting the scale for mouse motion, in floating point, when the mouse is in relative mode
*/
#define SDL_HINT_MOUSE_RELATIVE_SPEED_SCALE "SDL_MOUSE_RELATIVE_SPEED_SCALE"
/**
* \brief A variable controlling whether the system mouse acceleration curve is used for relative mouse motion.
*
* This variable can be set to the following values:
* "0" - Relative mouse motion will be unscaled (the default)
* "1" - Relative mouse motion will be scaled using the system mouse acceleration curve.
*
* If SDL_HINT_MOUSE_RELATIVE_SPEED_SCALE is set, that will override the system speed scale.
*/
#define SDL_HINT_MOUSE_RELATIVE_SYSTEM_SCALE "SDL_MOUSE_RELATIVE_SYSTEM_SCALE"
/**
* \brief A variable controlling whether a motion event should be generated for mouse warping in relative mode.
*
* This variable can be set to the following values:
* "0" - Warping the mouse will not generate a motion event in relative mode
* "1" - Warping the mouse will generate a motion event in relative mode
*
* By default warping the mouse will not generate motion events in relative mode. This avoids the application having to filter out large relative motion due to warping.
*/
#define SDL_HINT_MOUSE_RELATIVE_WARP_MOTION "SDL_MOUSE_RELATIVE_WARP_MOTION"
/**
* \brief A variable controlling whether mouse events should generate synthetic touch events
*
* This variable can be set to the following values:
* "0" - Mouse events will not generate touch events (default for desktop platforms)
* "1" - Mouse events will generate touch events (default for mobile platforms, such as Android and iOS)
*/
#define SDL_HINT_MOUSE_TOUCH_EVENTS "SDL_MOUSE_TOUCH_EVENTS"
/**
* \brief A variable controlling whether the mouse is captured while mouse buttons are pressed
*
* This variable can be set to the following values:
* "0" - The mouse is not captured while mouse buttons are pressed
* "1" - The mouse is captured while mouse buttons are pressed
*
* By default the mouse is captured while mouse buttons are pressed so if the mouse is dragged
* outside the window, the application continues to receive mouse events until the button is
* released.
*/
#define SDL_HINT_MOUSE_AUTO_CAPTURE "SDL_MOUSE_AUTO_CAPTURE"
/**
* \brief Tell SDL not to catch the SIGINT or SIGTERM signals.
*
* This hint only applies to Unix-like platforms, and should set before
* any calls to SDL_Init()
*
* The variable can be set to the following values:
* "0" - SDL will install a SIGINT and SIGTERM handler, and when it
* catches a signal, convert it into an SDL_QUIT event.
* "1" - SDL will not install a signal handler at all.
*/
#define SDL_HINT_NO_SIGNAL_HANDLERS "SDL_NO_SIGNAL_HANDLERS"
/**
* \brief A variable controlling what driver to use for OpenGL ES contexts.
*
* On some platforms, currently Windows and X11, OpenGL drivers may support
* creating contexts with an OpenGL ES profile. By default SDL uses these
* profiles, when available, otherwise it attempts to load an OpenGL ES
* library, e.g. that provided by the ANGLE project. This variable controls
* whether SDL follows this default behaviour or will always load an
* OpenGL ES library.
*
* Circumstances where this is useful include
* - Testing an app with a particular OpenGL ES implementation, e.g ANGLE,
* or emulator, e.g. those from ARM, Imagination or Qualcomm.
* - Resolving OpenGL ES function addresses at link time by linking with
* the OpenGL ES library instead of querying them at run time with
* SDL_GL_GetProcAddress().
*
* Caution: for an application to work with the default behaviour across
* different OpenGL drivers it must query the OpenGL ES function
* addresses at run time using SDL_GL_GetProcAddress().
*
* This variable is ignored on most platforms because OpenGL ES is native
* or not supported.
*
* This variable can be set to the following values:
* "0" - Use ES profile of OpenGL, if available. (Default when not set.)
* "1" - Load OpenGL ES library using the default library names.
*
*/
#define SDL_HINT_OPENGL_ES_DRIVER "SDL_OPENGL_ES_DRIVER"
/**
* \brief A variable controlling which orientations are allowed on iOS/Android.
*
* In some circumstances it is necessary to be able to explicitly control
* which UI orientations are allowed.
*
* This variable is a space delimited list of the following values:
* "LandscapeLeft", "LandscapeRight", "Portrait" "PortraitUpsideDown"
*/
#define SDL_HINT_ORIENTATIONS "SDL_IOS_ORIENTATIONS"
/**
* \brief A variable controlling the use of a sentinel event when polling the event queue
*
* This variable can be set to the following values:
* "0" - Disable poll sentinels
* "1" - Enable poll sentinels
*
* When polling for events, SDL_PumpEvents is used to gather new events from devices.
* If a device keeps producing new events between calls to SDL_PumpEvents, a poll loop will
* become stuck until the new events stop.
* This is most noticeable when moving a high frequency mouse.
*
* By default, poll sentinels are enabled.
*/
#define SDL_HINT_POLL_SENTINEL "SDL_POLL_SENTINEL"
/**
* \brief Override for SDL_GetPreferredLocales()
*
* If set, this will be favored over anything the OS might report for the
* user's preferred locales. Changing this hint at runtime will not generate
* a SDL_LOCALECHANGED event (but if you can change the hint, you can push
* your own event, if you want).
*
* The format of this hint is a comma-separated list of language and locale,
* combined with an underscore, as is a common format: "en_GB". Locale is
* optional: "en". So you might have a list like this: "en_GB,jp,es_PT"
*/
#define SDL_HINT_PREFERRED_LOCALES "SDL_PREFERRED_LOCALES"
/**
* \brief A variable describing the content orientation on QtWayland-based platforms.
*
* On QtWayland platforms, windows are rotated client-side to allow for custom
* transitions. In order to correctly position overlays (e.g. volume bar) and
* gestures (e.g. events view, close/minimize gestures), the system needs to
* know in which orientation the application is currently drawing its contents.
*
* This does not cause the window to be rotated or resized, the application
* needs to take care of drawing the content in the right orientation (the
* framebuffer is always in portrait mode).
*
* This variable can be one of the following values:
* "primary" (default), "portrait", "landscape", "inverted-portrait", "inverted-landscape"
*/
#define SDL_HINT_QTWAYLAND_CONTENT_ORIENTATION "SDL_QTWAYLAND_CONTENT_ORIENTATION"
/**
* \brief Flags to set on QtWayland windows to integrate with the native window manager.
*
* On QtWayland platforms, this hint controls the flags to set on the windows.
* For example, on Sailfish OS "OverridesSystemGestures" disables swipe gestures.
*
* This variable is a space-separated list of the following values (empty = no flags):
* "OverridesSystemGestures", "StaysOnTop", "BypassWindowManager"
*/
#define SDL_HINT_QTWAYLAND_WINDOW_FLAGS "SDL_QTWAYLAND_WINDOW_FLAGS"
/**
* \brief A variable controlling whether the 2D render API is compatible or efficient.
*
* This variable can be set to the following values:
*
* "0" - Don't use batching to make rendering more efficient.
* "1" - Use batching, but might cause problems if app makes its own direct OpenGL calls.
*
* Up to SDL 2.0.9, the render API would draw immediately when requested. Now
* it batches up draw requests and sends them all to the GPU only when forced
* to (during SDL_RenderPresent, when changing render targets, by updating a
* texture that the batch needs, etc). This is significantly more efficient,
* but it can cause problems for apps that expect to render on top of the
* render API's output. As such, SDL will disable batching if a specific
* render backend is requested (since this might indicate that the app is
* planning to use the underlying graphics API directly). This hint can
* be used to explicitly request batching in this instance. It is a contract
* that you will either never use the underlying graphics API directly, or
* if you do, you will call SDL_RenderFlush() before you do so any current
* batch goes to the GPU before your work begins. Not following this contract
* will result in undefined behavior.
*/
#define SDL_HINT_RENDER_BATCHING "SDL_RENDER_BATCHING"
/**
* \brief A variable controlling how the 2D render API renders lines
*
* This variable can be set to the following values:
* "0" - Use the default line drawing method (Bresenham's line algorithm as of SDL 2.0.20)
* "1" - Use the driver point API using Bresenham's line algorithm (correct, draws many points)
* "2" - Use the driver line API (occasionally misses line endpoints based on hardware driver quirks, was the default before 2.0.20)
* "3" - Use the driver geometry API (correct, draws thicker diagonal lines)
*
* This variable should be set when the renderer is created.
*/
#define SDL_HINT_RENDER_LINE_METHOD "SDL_RENDER_LINE_METHOD"
/**
* \brief A variable controlling whether to enable Direct3D 11+'s Debug Layer.
*
* This variable does not have any effect on the Direct3D 9 based renderer.
*
* This variable can be set to the following values:
* "0" - Disable Debug Layer use
* "1" - Enable Debug Layer use
*
* By default, SDL does not use Direct3D Debug Layer.
*/
#define SDL_HINT_RENDER_DIRECT3D11_DEBUG "SDL_RENDER_DIRECT3D11_DEBUG"
/**
* \brief A variable controlling whether the Direct3D device is initialized for thread-safe operations.
*
* This variable can be set to the following values:
* "0" - Thread-safety is not enabled (faster)
* "1" - Thread-safety is enabled
*
* By default the Direct3D device is created with thread-safety disabled.
*/
#define SDL_HINT_RENDER_DIRECT3D_THREADSAFE "SDL_RENDER_DIRECT3D_THREADSAFE"
/**
* \brief A variable specifying which render driver to use.
*
* If the application doesn't pick a specific renderer to use, this variable
* specifies the name of the preferred renderer. If the preferred renderer
* can't be initialized, the normal default renderer is used.
*
* This variable is case insensitive and can be set to the following values:
* "direct3d"
* "direct3d11"
* "direct3d12"
* "opengl"
* "opengles2"
* "opengles"
* "metal"
* "software"
*
* The default varies by platform, but it's the first one in the list that
* is available on the current platform.
*/
#define SDL_HINT_RENDER_DRIVER "SDL_RENDER_DRIVER"
/**
* \brief A variable controlling the scaling policy for SDL_RenderSetLogicalSize.
*
* This variable can be set to the following values:
* "0" or "letterbox" - Uses letterbox/sidebars to fit the entire rendering on screen
* "1" or "overscan" - Will zoom the rendering so it fills the entire screen, allowing edges to be drawn offscreen
*
* By default letterbox is used
*/
#define SDL_HINT_RENDER_LOGICAL_SIZE_MODE "SDL_RENDER_LOGICAL_SIZE_MODE"
/**
* \brief A variable controlling whether the OpenGL render driver uses shaders if they are available.
*
* This variable can be set to the following values:
* "0" - Disable shaders
* "1" - Enable shaders
*
* By default shaders are used if OpenGL supports them.
*/
#define SDL_HINT_RENDER_OPENGL_SHADERS "SDL_RENDER_OPENGL_SHADERS"
/**
* \brief A variable controlling the scaling quality
*
* This variable can be set to the following values:
* "0" or "nearest" - Nearest pixel sampling
* "1" or "linear" - Linear filtering (supported by OpenGL and Direct3D)
* "2" or "best" - Currently this is the same as "linear"
*
* By default nearest pixel sampling is used
*/
#define SDL_HINT_RENDER_SCALE_QUALITY "SDL_RENDER_SCALE_QUALITY"
/**
* \brief A variable controlling whether updates to the SDL screen surface should be synchronized with the vertical refresh, to avoid tearing.
*
* This variable can be set to the following values:
* "0" - Disable vsync
* "1" - Enable vsync
*
* By default SDL does not sync screen surface updates with vertical refresh.
*/
#define SDL_HINT_RENDER_VSYNC "SDL_RENDER_VSYNC"
/**
* \brief A variable controlling if VSYNC is automatically disable if doesn't reach the enough FPS
*
* This variable can be set to the following values:
* "0" - It will be using VSYNC as defined in the main flag. Default
* "1" - If VSYNC was previously enabled, then it will disable VSYNC if doesn't reach enough speed
*
* By default SDL does not enable the automatic VSYNC
*/
#define SDL_HINT_PS2_DYNAMIC_VSYNC "SDL_PS2_DYNAMIC_VSYNC"
/**
* \brief A variable to control whether the return key on the soft keyboard
* should hide the soft keyboard on Android and iOS.
*
* The variable can be set to the following values:
* "0" - The return key will be handled as a key event. This is the behaviour of SDL <= 2.0.3. (default)
* "1" - The return key will hide the keyboard.
*
* The value of this hint is used at runtime, so it can be changed at any time.
*/
#define SDL_HINT_RETURN_KEY_HIDES_IME "SDL_RETURN_KEY_HIDES_IME"
/**
* \brief Tell SDL which Dispmanx layer to use on a Raspberry PI
*
* Also known as Z-order. The variable can take a negative or positive value.
* The default is 10000.
*/
#define SDL_HINT_RPI_VIDEO_LAYER "SDL_RPI_VIDEO_LAYER"
/**
* \brief Specify an "activity name" for screensaver inhibition.
*
* Some platforms, notably Linux desktops, list the applications which are
* inhibiting the screensaver or other power-saving features.
*
* This hint lets you specify the "activity name" sent to the OS when
* SDL_DisableScreenSaver() is used (or the screensaver is automatically
* disabled). The contents of this hint are used when the screensaver is
* disabled. You should use a string that describes what your program is doing
* (and, therefore, why the screensaver is disabled). For example, "Playing a
* game" or "Watching a video".
*
* Setting this to "" or leaving it unset will have SDL use a reasonable
* default: "Playing a game" or something similar.
*
* On targets where this is not supported, this hint does nothing.
*/
#define SDL_HINT_SCREENSAVER_INHIBIT_ACTIVITY_NAME "SDL_SCREENSAVER_INHIBIT_ACTIVITY_NAME"
/**
* \brief Specifies whether SDL_THREAD_PRIORITY_TIME_CRITICAL should be treated as realtime.
*
* On some platforms, like Linux, a realtime priority thread may be subject to restrictions
* that require special handling by the application. This hint exists to let SDL know that
* the app is prepared to handle said restrictions.
*
* On Linux, SDL will apply the following configuration to any thread that becomes realtime:
* * The SCHED_RESET_ON_FORK bit will be set on the scheduling policy,
* * An RLIMIT_RTTIME budget will be configured to the rtkit specified limit.
* * Exceeding this limit will result in the kernel sending SIGKILL to the app,
* * Refer to the man pages for more information.
*
* This variable can be set to the following values:
* "0" - default platform specific behaviour
* "1" - Force SDL_THREAD_PRIORITY_TIME_CRITICAL to a realtime scheduling policy
*/
#define SDL_HINT_THREAD_FORCE_REALTIME_TIME_CRITICAL "SDL_THREAD_FORCE_REALTIME_TIME_CRITICAL"
/**
* \brief A string specifying additional information to use with SDL_SetThreadPriority.
*
* By default SDL_SetThreadPriority will make appropriate system changes in order to
* apply a thread priority. For example on systems using pthreads the scheduler policy
* is changed automatically to a policy that works well with a given priority.
* Code which has specific requirements can override SDL's default behavior with this hint.
*
* pthread hint values are "current", "other", "fifo" and "rr".
* Currently no other platform hint values are defined but may be in the future.
*
* \note On Linux, the kernel may send SIGKILL to realtime tasks which exceed the distro
* configured execution budget for rtkit. This budget can be queried through RLIMIT_RTTIME
* after calling SDL_SetThreadPriority().
*/
#define SDL_HINT_THREAD_PRIORITY_POLICY "SDL_THREAD_PRIORITY_POLICY"
/**
* \brief A string specifying SDL's threads stack size in bytes or "0" for the backend's default size
*
* Use this hint in case you need to set SDL's threads stack size to other than the default.
* This is specially useful if you build SDL against a non glibc libc library (such as musl) which
* provides a relatively small default thread stack size (a few kilobytes versus the default 8MB glibc uses).
* Support for this hint is currently available only in the pthread, Windows, and PSP backend.
*
* Instead of this hint, in 2.0.9 and later, you can use
* SDL_CreateThreadWithStackSize(). This hint only works with the classic
* SDL_CreateThread().
*/
#define SDL_HINT_THREAD_STACK_SIZE "SDL_THREAD_STACK_SIZE"
/**
* \brief A variable that controls the timer resolution, in milliseconds.
*
* The higher resolution the timer, the more frequently the CPU services
* timer interrupts, and the more precise delays are, but this takes up
* power and CPU time. This hint is only used on Windows.
*
* See this blog post for more information:
* http://randomascii.wordpress.com/2013/07/08/windows-timer-resolution-megawatts-wasted/
*
* If this variable is set to "0", the system timer resolution is not set.
*
* The default value is "1". This hint may be set at any time.
*/
#define SDL_HINT_TIMER_RESOLUTION "SDL_TIMER_RESOLUTION"
/**
* \brief A variable controlling whether touch events should generate synthetic mouse events
*
* This variable can be set to the following values:
* "0" - Touch events will not generate mouse events
* "1" - Touch events will generate mouse events
*
* By default SDL will generate mouse events for touch events
*/
#define SDL_HINT_TOUCH_MOUSE_EVENTS "SDL_TOUCH_MOUSE_EVENTS"
/**
* \brief A variable controlling which touchpad should generate synthetic mouse events
*
* This variable can be set to the following values:
* "0" - Only front touchpad should generate mouse events. Default
* "1" - Only back touchpad should generate mouse events.
* "2" - Both touchpads should generate mouse events.
*
* By default SDL will generate mouse events for all touch devices
*/
#define SDL_HINT_VITA_TOUCH_MOUSE_DEVICE "SDL_HINT_VITA_TOUCH_MOUSE_DEVICE"
/**
* \brief A variable controlling whether the Android / tvOS remotes
* should be listed as joystick devices, instead of sending keyboard events.
*
* This variable can be set to the following values:
* "0" - Remotes send enter/escape/arrow key events
* "1" - Remotes are available as 2 axis, 2 button joysticks (the default).
*/
#define SDL_HINT_TV_REMOTE_AS_JOYSTICK "SDL_TV_REMOTE_AS_JOYSTICK"
/**
* \brief A variable controlling whether the screensaver is enabled.
*
* This variable can be set to the following values:
* "0" - Disable screensaver
* "1" - Enable screensaver
*
* By default SDL will disable the screensaver.
*/
#define SDL_HINT_VIDEO_ALLOW_SCREENSAVER "SDL_VIDEO_ALLOW_SCREENSAVER"
/**
* \brief Tell the video driver that we only want a double buffer.
*
* By default, most lowlevel 2D APIs will use a triple buffer scheme that
* wastes no CPU time on waiting for vsync after issuing a flip, but
* introduces a frame of latency. On the other hand, using a double buffer
* scheme instead is recommended for cases where low latency is an important
* factor because we save a whole frame of latency.
* We do so by waiting for vsync immediately after issuing a flip, usually just
* after eglSwapBuffers call in the backend's *_SwapWindow function.
*
* Since it's driver-specific, it's only supported where possible and
* implemented. Currently supported the following drivers:
*
* - KMSDRM (kmsdrm)
* - Raspberry Pi (raspberrypi)
*/
#define SDL_HINT_VIDEO_DOUBLE_BUFFER "SDL_VIDEO_DOUBLE_BUFFER"
/**
* \brief A variable controlling whether the EGL window is allowed to be
* composited as transparent, rather than opaque.
*
* Most window systems will always render windows opaque, even if the surface
* format has an alpha channel. This is not always true, however, so by default
* SDL will try to enforce opaque composition. To override this behavior, you
* can set this hint to "1".
*/
#define SDL_HINT_VIDEO_EGL_ALLOW_TRANSPARENCY "SDL_VIDEO_EGL_ALLOW_TRANSPARENCY"
/**
* \brief A variable controlling whether the graphics context is externally managed.
*
* This variable can be set to the following values:
* "0" - SDL will manage graphics contexts that are attached to windows.
* "1" - Disable graphics context management on windows.
*
* By default SDL will manage OpenGL contexts in certain situations. For example, on Android the
* context will be automatically saved and restored when pausing the application. Additionally, some
* platforms will assume usage of OpenGL if Vulkan isn't used. Setting this to "1" will prevent this
* behavior, which is desireable when the application manages the graphics context, such as
* an externally managed OpenGL context or attaching a Vulkan surface to the window.
*/
#define SDL_HINT_VIDEO_EXTERNAL_CONTEXT "SDL_VIDEO_EXTERNAL_CONTEXT"
/**
* \brief If set to 1, then do not allow high-DPI windows. ("Retina" on Mac and iOS)
*/
#define SDL_HINT_VIDEO_HIGHDPI_DISABLED "SDL_VIDEO_HIGHDPI_DISABLED"
/**
* \brief A variable that dictates policy for fullscreen Spaces on Mac OS X.
*
* This hint only applies to Mac OS X.
*
* The variable can be set to the following values:
* "0" - Disable Spaces support (FULLSCREEN_DESKTOP won't use them and
* SDL_WINDOW_RESIZABLE windows won't offer the "fullscreen"
* button on their titlebars).
* "1" - Enable Spaces support (FULLSCREEN_DESKTOP will use them and
* SDL_WINDOW_RESIZABLE windows will offer the "fullscreen"
* button on their titlebars).
*
* The default value is "1". This hint must be set before any windows are created.
*/
#define SDL_HINT_VIDEO_MAC_FULLSCREEN_SPACES "SDL_VIDEO_MAC_FULLSCREEN_SPACES"
/**
* \brief Minimize your SDL_Window if it loses key focus when in fullscreen mode. Defaults to false.
* \warning Before SDL 2.0.14, this defaulted to true! In 2.0.14, we're
* seeing if "true" causes more problems than it solves in modern times.
*
*/
#define SDL_HINT_VIDEO_MINIMIZE_ON_FOCUS_LOSS "SDL_VIDEO_MINIMIZE_ON_FOCUS_LOSS"
/**
* \brief A variable controlling whether the libdecor Wayland backend is allowed to be used.
*
* This variable can be set to the following values:
* "0" - libdecor use is disabled.
* "1" - libdecor use is enabled (default).
*
* libdecor is used over xdg-shell when xdg-decoration protocol is unavailable.
*/
#define SDL_HINT_VIDEO_WAYLAND_ALLOW_LIBDECOR "SDL_VIDEO_WAYLAND_ALLOW_LIBDECOR"
/**
* \brief A variable controlling whether the libdecor Wayland backend is preferred over native decrations.
*
* When this hint is set, libdecor will be used to provide window decorations, even if xdg-decoration is
* available. (Note that, by default, libdecor will use xdg-decoration itself if available).
*
* This variable can be set to the following values:
* "0" - libdecor is enabled only if server-side decorations are unavailable.
* "1" - libdecor is always enabled if available.
*
* libdecor is used over xdg-shell when xdg-decoration protocol is unavailable.
*/
#define SDL_HINT_VIDEO_WAYLAND_PREFER_LIBDECOR "SDL_VIDEO_WAYLAND_PREFER_LIBDECOR"
/**
* \brief A variable controlling whether video mode emulation is enabled under Wayland.
*
* When this hint is set, a standard set of emulated CVT video modes will be exposed for use by the application.
* If it is disabled, the only modes exposed will be the logical desktop size and, in the case of a scaled
* desktop, the native display resolution.
*
* This variable can be set to the following values:
* "0" - Video mode emulation is disabled.
* "1" - Video mode emulation is enabled.
*
* By default video mode emulation is enabled.
*/
#define SDL_HINT_VIDEO_WAYLAND_MODE_EMULATION "SDL_VIDEO_WAYLAND_MODE_EMULATION"
/**
* \brief Enable or disable mouse pointer warp emulation, needed by some older games.
*
* When this hint is set, any SDL will emulate mouse warps using relative mouse mode.
* This is required for some older games (such as Source engine games), which warp the
* mouse to the centre of the screen rather than using relative mouse motion. Note that
* relative mouse mode may have different mouse acceleration behaviour than pointer warps.
*
* This variable can be set to the following values:
* "0" - All mouse warps fail, as mouse warping is not available under wayland.
* "1" - Some mouse warps will be emulated by forcing relative mouse mode.
*
* If not set, this is automatically enabled unless an application uses relative mouse
* mode directly.
*/
#define SDL_HINT_VIDEO_WAYLAND_EMULATE_MOUSE_WARP "SDL_VIDEO_WAYLAND_EMULATE_MOUSE_WARP"
/**
* \brief A variable that is the address of another SDL_Window* (as a hex string formatted with "%p").
*
* If this hint is set before SDL_CreateWindowFrom() and the SDL_Window* it is set to has
* SDL_WINDOW_OPENGL set (and running on WGL only, currently), then two things will occur on the newly
* created SDL_Window:
*
* 1. Its pixel format will be set to the same pixel format as this SDL_Window. This is
* needed for example when sharing an OpenGL context across multiple windows.
*
* 2. The flag SDL_WINDOW_OPENGL will be set on the new window so it can be used for
* OpenGL rendering.
*
* This variable can be set to the following values:
* The address (as a string "%p") of the SDL_Window* that new windows created with SDL_CreateWindowFrom() should
* share a pixel format with.
*/
#define SDL_HINT_VIDEO_WINDOW_SHARE_PIXEL_FORMAT "SDL_VIDEO_WINDOW_SHARE_PIXEL_FORMAT"
/**
* \brief When calling SDL_CreateWindowFrom(), make the window compatible with OpenGL.
*
* This variable can be set to the following values:
* "0" - Don't add any graphics flags to the SDL_WindowFlags
* "1" - Add SDL_WINDOW_OPENGL to the SDL_WindowFlags
*
* By default SDL will not make the foreign window compatible with OpenGL.
*/
#define SDL_HINT_VIDEO_FOREIGN_WINDOW_OPENGL "SDL_VIDEO_FOREIGN_WINDOW_OPENGL"
/**
* \brief When calling SDL_CreateWindowFrom(), make the window compatible with Vulkan.
*
* This variable can be set to the following values:
* "0" - Don't add any graphics flags to the SDL_WindowFlags
* "1" - Add SDL_WINDOW_VULKAN to the SDL_WindowFlags
*
* By default SDL will not make the foreign window compatible with Vulkan.
*/
#define SDL_HINT_VIDEO_FOREIGN_WINDOW_VULKAN "SDL_VIDEO_FOREIGN_WINDOW_VULKAN"
/**
* \brief A variable specifying which shader compiler to preload when using the Chrome ANGLE binaries
*
* SDL has EGL and OpenGL ES2 support on Windows via the ANGLE project. It
* can use two different sets of binaries, those compiled by the user from source
* or those provided by the Chrome browser. In the later case, these binaries require
* that SDL loads a DLL providing the shader compiler.
*
* This variable can be set to the following values:
* "d3dcompiler_46.dll" - default, best for Vista or later.
* "d3dcompiler_43.dll" - for XP support.
* "none" - do not load any library, useful if you compiled ANGLE from source and included the compiler in your binaries.
*
*/
#define SDL_HINT_VIDEO_WIN_D3DCOMPILER "SDL_VIDEO_WIN_D3DCOMPILER"
/**
* \brief A variable controlling whether X11 should use GLX or EGL by default
*
* This variable can be set to the following values:
* "0" - Use GLX
* "1" - Use EGL
*
* By default SDL will use GLX when both are present.
*/
#define SDL_HINT_VIDEO_X11_FORCE_EGL "SDL_VIDEO_X11_FORCE_EGL"
/**
* \brief A variable controlling whether the X11 _NET_WM_BYPASS_COMPOSITOR hint should be used.
*
* This variable can be set to the following values:
* "0" - Disable _NET_WM_BYPASS_COMPOSITOR
* "1" - Enable _NET_WM_BYPASS_COMPOSITOR
*
* By default SDL will use _NET_WM_BYPASS_COMPOSITOR
*
*/
#define SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR "SDL_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR"
/**
* \brief A variable controlling whether the X11 _NET_WM_PING protocol should be supported.
*
* This variable can be set to the following values:
* "0" - Disable _NET_WM_PING
* "1" - Enable _NET_WM_PING
*
* By default SDL will use _NET_WM_PING, but for applications that know they
* will not always be able to respond to ping requests in a timely manner they can
* turn it off to avoid the window manager thinking the app is hung.
* The hint is checked in CreateWindow.
*/
#define SDL_HINT_VIDEO_X11_NET_WM_PING "SDL_VIDEO_X11_NET_WM_PING"
/**
* \brief A variable forcing the visual ID chosen for new X11 windows
*
*/
#define SDL_HINT_VIDEO_X11_WINDOW_VISUALID "SDL_VIDEO_X11_WINDOW_VISUALID"
/**
* \brief A no-longer-used variable controlling whether the X11 Xinerama extension should be used.
*
* Before SDL 2.0.24, this would let apps and users disable Xinerama support on X11.
* Now SDL never uses Xinerama, and does not check for this hint at all.
* The preprocessor define is left here for source compatibility.
*/
#define SDL_HINT_VIDEO_X11_XINERAMA "SDL_VIDEO_X11_XINERAMA"
/**
* \brief A variable controlling whether the X11 XRandR extension should be used.
*
* This variable can be set to the following values:
* "0" - Disable XRandR
* "1" - Enable XRandR
*
* By default SDL will use XRandR.
*/
#define SDL_HINT_VIDEO_X11_XRANDR "SDL_VIDEO_X11_XRANDR"
/**
* \brief A no-longer-used variable controlling whether the X11 VidMode extension should be used.
*
* Before SDL 2.0.24, this would let apps and users disable XVidMode support on X11.
* Now SDL never uses XVidMode, and does not check for this hint at all.
* The preprocessor define is left here for source compatibility.
*/
#define SDL_HINT_VIDEO_X11_XVIDMODE "SDL_VIDEO_X11_XVIDMODE"
/**
* \brief Controls how the fact chunk affects the loading of a WAVE file.
*
* The fact chunk stores information about the number of samples of a WAVE
* file. The Standards Update from Microsoft notes that this value can be used
* to 'determine the length of the data in seconds'. This is especially useful
* for compressed formats (for which this is a mandatory chunk) if they produce
* multiple sample frames per block and truncating the block is not allowed.
* The fact chunk can exactly specify how many sample frames there should be
* in this case.
*
* Unfortunately, most application seem to ignore the fact chunk and so SDL
* ignores it by default as well.
*
* This variable can be set to the following values:
*
* "truncate" - Use the number of samples to truncate the wave data if
* the fact chunk is present and valid
* "strict" - Like "truncate", but raise an error if the fact chunk
* is invalid, not present for non-PCM formats, or if the
* data chunk doesn't have that many samples
* "ignorezero" - Like "truncate", but ignore fact chunk if the number of
* samples is zero
* "ignore" - Ignore fact chunk entirely (default)
*/
#define SDL_HINT_WAVE_FACT_CHUNK "SDL_WAVE_FACT_CHUNK"
/**
* \brief Controls how the size of the RIFF chunk affects the loading of a WAVE file.
*
* The size of the RIFF chunk (which includes all the sub-chunks of the WAVE
* file) is not always reliable. In case the size is wrong, it's possible to
* just ignore it and step through the chunks until a fixed limit is reached.
*
* Note that files that have trailing data unrelated to the WAVE file or
* corrupt files may slow down the loading process without a reliable boundary.
* By default, SDL stops after 10000 chunks to prevent wasting time. Use the
* environment variable SDL_WAVE_CHUNK_LIMIT to adjust this value.
*
* This variable can be set to the following values:
*
* "force" - Always use the RIFF chunk size as a boundary for the chunk search
* "ignorezero" - Like "force", but a zero size searches up to 4 GiB (default)
* "ignore" - Ignore the RIFF chunk size and always search up to 4 GiB
* "maximum" - Search for chunks until the end of file (not recommended)
*/
#define SDL_HINT_WAVE_RIFF_CHUNK_SIZE "SDL_WAVE_RIFF_CHUNK_SIZE"
/**
* \brief Controls how a truncated WAVE file is handled.
*
* A WAVE file is considered truncated if any of the chunks are incomplete or
* the data chunk size is not a multiple of the block size. By default, SDL
* decodes until the first incomplete block, as most applications seem to do.
*
* This variable can be set to the following values:
*
* "verystrict" - Raise an error if the file is truncated
* "strict" - Like "verystrict", but the size of the RIFF chunk is ignored
* "dropframe" - Decode until the first incomplete sample frame
* "dropblock" - Decode until the first incomplete block (default)
*/
#define SDL_HINT_WAVE_TRUNCATION "SDL_WAVE_TRUNCATION"
/**
* \brief Tell SDL not to name threads on Windows with the 0x406D1388 Exception.
* The 0x406D1388 Exception is a trick used to inform Visual Studio of a
* thread's name, but it tends to cause problems with other debuggers,
* and the .NET runtime. Note that SDL 2.0.6 and later will still use
* the (safer) SetThreadDescription API, introduced in the Windows 10
* Creators Update, if available.
*
* The variable can be set to the following values:
* "0" - SDL will raise the 0x406D1388 Exception to name threads.
* This is the default behavior of SDL <= 2.0.4.
* "1" - SDL will not raise this exception, and threads will be unnamed. (default)
* This is necessary with .NET languages or debuggers that aren't Visual Studio.
*/
#define SDL_HINT_WINDOWS_DISABLE_THREAD_NAMING "SDL_WINDOWS_DISABLE_THREAD_NAMING"
/**
* \brief A variable controlling whether the windows message loop is processed by SDL
*
* This variable can be set to the following values:
* "0" - The window message loop is not run
* "1" - The window message loop is processed in SDL_PumpEvents()
*
* By default SDL will process the windows message loop
*/
#define SDL_HINT_WINDOWS_ENABLE_MESSAGELOOP "SDL_WINDOWS_ENABLE_MESSAGELOOP"
/**
* \brief Force SDL to use Critical Sections for mutexes on Windows.
* On Windows 7 and newer, Slim Reader/Writer Locks are available.
* They offer better performance, allocate no kernel ressources and
* use less memory. SDL will fall back to Critical Sections on older
* OS versions or if forced to by this hint.
*
* This variable can be set to the following values:
* "0" - Use SRW Locks when available. If not, fall back to Critical Sections. (default)
* "1" - Force the use of Critical Sections in all cases.
*
*/
#define SDL_HINT_WINDOWS_FORCE_MUTEX_CRITICAL_SECTIONS "SDL_WINDOWS_FORCE_MUTEX_CRITICAL_SECTIONS"
/**
* \brief Force SDL to use Kernel Semaphores on Windows.
* Kernel Semaphores are inter-process and require a context
* switch on every interaction. On Windows 8 and newer, the
* WaitOnAddress API is available. Using that and atomics to
* implement semaphores increases performance.
* SDL will fall back to Kernel Objects on older OS versions
* or if forced to by this hint.
*
* This variable can be set to the following values:
* "0" - Use Atomics and WaitOnAddress API when available. If not, fall back to Kernel Objects. (default)
* "1" - Force the use of Kernel Objects in all cases.
*
*/
#define SDL_HINT_WINDOWS_FORCE_SEMAPHORE_KERNEL "SDL_WINDOWS_FORCE_SEMAPHORE_KERNEL"
/**
* \brief A variable to specify custom icon resource id from RC file on Windows platform
*/
#define SDL_HINT_WINDOWS_INTRESOURCE_ICON "SDL_WINDOWS_INTRESOURCE_ICON"
#define SDL_HINT_WINDOWS_INTRESOURCE_ICON_SMALL "SDL_WINDOWS_INTRESOURCE_ICON_SMALL"
/**
* \brief Tell SDL not to generate window-close events for Alt+F4 on Windows.
*
* The variable can be set to the following values:
* "0" - SDL will generate a window-close event when it sees Alt+F4.
* "1" - SDL will only do normal key handling for Alt+F4.
*/
#define SDL_HINT_WINDOWS_NO_CLOSE_ON_ALT_F4 "SDL_WINDOWS_NO_CLOSE_ON_ALT_F4"
/**
* \brief Use the D3D9Ex API introduced in Windows Vista, instead of normal D3D9.
* Direct3D 9Ex contains changes to state management that can eliminate device
* loss errors during scenarios like Alt+Tab or UAC prompts. D3D9Ex may require
* some changes to your application to cope with the new behavior, so this
* is disabled by default.
*
* This hint must be set before initializing the video subsystem.
*
* For more information on Direct3D 9Ex, see:
* - https://docs.microsoft.com/en-us/windows/win32/direct3darticles/graphics-apis-in-windows-vista#direct3d-9ex
* - https://docs.microsoft.com/en-us/windows/win32/direct3darticles/direct3d-9ex-improvements
*
* This variable can be set to the following values:
* "0" - Use the original Direct3D 9 API (default)
* "1" - Use the Direct3D 9Ex API on Vista and later (and fall back if D3D9Ex is unavailable)
*
*/
#define SDL_HINT_WINDOWS_USE_D3D9EX "SDL_WINDOWS_USE_D3D9EX"
/**
* \brief Controls whether SDL will declare the process to be DPI aware.
*
* This hint must be set before initializing the video subsystem.
*
* The main purpose of declaring DPI awareness is to disable OS bitmap scaling of SDL windows on monitors with
* a DPI scale factor.
*
* This hint is equivalent to requesting DPI awareness via external means (e.g. calling SetProcessDpiAwarenessContext)
* and does not cause SDL to use a virtualized coordinate system, so it will generally give you 1 SDL coordinate = 1 pixel
* even on high-DPI displays.
*
* For more information, see:
* https://docs.microsoft.com/en-us/windows/win32/hidpi/high-dpi-desktop-application-development-on-windows
*
* This variable can be set to the following values:
* "" - Do not change the DPI awareness (default).
* "unaware" - Declare the process as DPI unaware. (Windows 8.1 and later).
* "system" - Request system DPI awareness. (Vista and later).
* "permonitor" - Request per-monitor DPI awareness. (Windows 8.1 and later).
* "permonitorv2" - Request per-monitor V2 DPI awareness. (Windows 10, version 1607 and later).
* The most visible difference from "permonitor" is that window title bar will be scaled
* to the visually correct size when dragging between monitors with different scale factors.
* This is the preferred DPI awareness level.
*
* If the requested DPI awareness is not available on the currently running OS, SDL will try to request the best
* available match.
*/
#define SDL_HINT_WINDOWS_DPI_AWARENESS "SDL_WINDOWS_DPI_AWARENESS"
/**
* \brief Uses DPI-scaled points as the SDL coordinate system on Windows.
*
* This changes the SDL coordinate system units to be DPI-scaled points, rather than pixels everywhere.
* This means windows will be appropriately sized, even when created on high-DPI displays with scaling.
*
* e.g. requesting a 640x480 window from SDL, on a display with 125% scaling in Windows display settings,
* will create a window with an 800x600 client area (in pixels).
*
* Setting this to "1" implicitly requests process DPI awareness (setting SDL_WINDOWS_DPI_AWARENESS is unnecessary),
* and forces SDL_WINDOW_ALLOW_HIGHDPI on all windows.
*
* This variable can be set to the following values:
* "0" - SDL coordinates equal Windows coordinates. No automatic window resizing when dragging
* between monitors with different scale factors (unless this is performed by
* Windows itself, which is the case when the process is DPI unaware).
* "1" - SDL coordinates are in DPI-scaled points. Automatically resize windows as needed on
* displays with non-100% scale factors.
*/
#define SDL_HINT_WINDOWS_DPI_SCALING "SDL_WINDOWS_DPI_SCALING"
/**
* \brief A variable controlling whether the window frame and title bar are interactive when the cursor is hidden
*
* This variable can be set to the following values:
* "0" - The window frame is not interactive when the cursor is hidden (no move, resize, etc)
* "1" - The window frame is interactive when the cursor is hidden
*
* By default SDL will allow interaction with the window frame when the cursor is hidden
*/
#define SDL_HINT_WINDOW_FRAME_USABLE_WHILE_CURSOR_HIDDEN "SDL_WINDOW_FRAME_USABLE_WHILE_CURSOR_HIDDEN"
/**
* \brief A variable controlling whether the window is activated when the SDL_ShowWindow function is called
*
* This variable can be set to the following values:
* "0" - The window is activated when the SDL_ShowWindow function is called
* "1" - The window is not activated when the SDL_ShowWindow function is called
*
* By default SDL will activate the window when the SDL_ShowWindow function is called
*/
#define SDL_HINT_WINDOW_NO_ACTIVATION_WHEN_SHOWN "SDL_WINDOW_NO_ACTIVATION_WHEN_SHOWN"
/** \brief Allows back-button-press events on Windows Phone to be marked as handled
*
* Windows Phone devices typically feature a Back button. When pressed,
* the OS will emit back-button-press events, which apps are expected to
* handle in an appropriate manner. If apps do not explicitly mark these
* events as 'Handled', then the OS will invoke its default behavior for
* unhandled back-button-press events, which on Windows Phone 8 and 8.1 is to
* terminate the app (and attempt to switch to the previous app, or to the
* device's home screen).
*
* Setting the SDL_HINT_WINRT_HANDLE_BACK_BUTTON hint to "1" will cause SDL
* to mark back-button-press events as Handled, if and when one is sent to
* the app.
*
* Internally, Windows Phone sends back button events as parameters to
* special back-button-press callback functions. Apps that need to respond
* to back-button-press events are expected to register one or more
* callback functions for such, shortly after being launched (during the
* app's initialization phase). After the back button is pressed, the OS
* will invoke these callbacks. If the app's callback(s) do not explicitly
* mark the event as handled by the time they return, or if the app never
* registers one of these callback, the OS will consider the event
* un-handled, and it will apply its default back button behavior (terminate
* the app).
*
* SDL registers its own back-button-press callback with the Windows Phone
* OS. This callback will emit a pair of SDL key-press events (SDL_KEYDOWN
* and SDL_KEYUP), each with a scancode of SDL_SCANCODE_AC_BACK, after which
* it will check the contents of the hint, SDL_HINT_WINRT_HANDLE_BACK_BUTTON.
* If the hint's value is set to "1", the back button event's Handled
* property will get set to 'true'. If the hint's value is set to something
* else, or if it is unset, SDL will leave the event's Handled property
* alone. (By default, the OS sets this property to 'false', to note.)
*
* SDL apps can either set SDL_HINT_WINRT_HANDLE_BACK_BUTTON well before a
* back button is pressed, or can set it in direct-response to a back button
* being pressed.
*
* In order to get notified when a back button is pressed, SDL apps should
* register a callback function with SDL_AddEventWatch(), and have it listen
* for SDL_KEYDOWN events that have a scancode of SDL_SCANCODE_AC_BACK.
* (Alternatively, SDL_KEYUP events can be listened-for. Listening for
* either event type is suitable.) Any value of SDL_HINT_WINRT_HANDLE_BACK_BUTTON
* set by such a callback, will be applied to the OS' current
* back-button-press event.
*
* More details on back button behavior in Windows Phone apps can be found
* at the following page, on Microsoft's developer site:
* http://msdn.microsoft.com/en-us/library/windowsphone/develop/jj247550(v=vs.105).aspx
*/
#define SDL_HINT_WINRT_HANDLE_BACK_BUTTON "SDL_WINRT_HANDLE_BACK_BUTTON"
/** \brief Label text for a WinRT app's privacy policy link
*
* Network-enabled WinRT apps must include a privacy policy. On Windows 8, 8.1, and RT,
* Microsoft mandates that this policy be available via the Windows Settings charm.
* SDL provides code to add a link there, with its label text being set via the
* optional hint, SDL_HINT_WINRT_PRIVACY_POLICY_LABEL.
*
* Please note that a privacy policy's contents are not set via this hint. A separate
* hint, SDL_HINT_WINRT_PRIVACY_POLICY_URL, is used to link to the actual text of the
* policy.
*
* The contents of this hint should be encoded as a UTF8 string.
*
* The default value is "Privacy Policy". This hint should only be set during app
* initialization, preferably before any calls to SDL_Init().
*
* For additional information on linking to a privacy policy, see the documentation for
* SDL_HINT_WINRT_PRIVACY_POLICY_URL.
*/
#define SDL_HINT_WINRT_PRIVACY_POLICY_LABEL "SDL_WINRT_PRIVACY_POLICY_LABEL"
/**
* \brief A URL to a WinRT app's privacy policy
*
* All network-enabled WinRT apps must make a privacy policy available to its
* users. On Windows 8, 8.1, and RT, Microsoft mandates that this policy be
* be available in the Windows Settings charm, as accessed from within the app.
* SDL provides code to add a URL-based link there, which can point to the app's
* privacy policy.
*
* To setup a URL to an app's privacy policy, set SDL_HINT_WINRT_PRIVACY_POLICY_URL
* before calling any SDL_Init() functions. The contents of the hint should
* be a valid URL. For example, "http://www.example.com".
*
* The default value is "", which will prevent SDL from adding a privacy policy
* link to the Settings charm. This hint should only be set during app init.
*
* The label text of an app's "Privacy Policy" link may be customized via another
* hint, SDL_HINT_WINRT_PRIVACY_POLICY_LABEL.
*
* Please note that on Windows Phone, Microsoft does not provide standard UI
* for displaying a privacy policy link, and as such, SDL_HINT_WINRT_PRIVACY_POLICY_URL
* will not get used on that platform. Network-enabled phone apps should display
* their privacy policy through some other, in-app means.
*/
#define SDL_HINT_WINRT_PRIVACY_POLICY_URL "SDL_WINRT_PRIVACY_POLICY_URL"
/**
* \brief Mark X11 windows as override-redirect.
*
* If set, this _might_ increase framerate at the expense of the desktop
* not working as expected. Override-redirect windows aren't noticed by the
* window manager at all.
*
* You should probably only use this for fullscreen windows, and you probably
* shouldn't even use it for that. But it's here if you want to try!
*/
#define SDL_HINT_X11_FORCE_OVERRIDE_REDIRECT "SDL_X11_FORCE_OVERRIDE_REDIRECT"
/**
* \brief A variable that lets you disable the detection and use of Xinput gamepad devices
*
* The variable can be set to the following values:
* "0" - Disable XInput detection (only uses direct input)
* "1" - Enable XInput detection (the default)
*/
#define SDL_HINT_XINPUT_ENABLED "SDL_XINPUT_ENABLED"
/**
* \brief A variable that lets you disable the detection and use of DirectInput gamepad devices
*
* The variable can be set to the following values:
* "0" - Disable DirectInput detection (only uses XInput)
* "1" - Enable DirectInput detection (the default)
*/
#define SDL_HINT_DIRECTINPUT_ENABLED "SDL_DIRECTINPUT_ENABLED"
/**
* \brief A variable that causes SDL to use the old axis and button mapping for XInput devices.
*
* This hint is for backwards compatibility only and will be removed in SDL 2.1
*
* The default value is "0". This hint must be set before SDL_Init()
*/
#define SDL_HINT_XINPUT_USE_OLD_JOYSTICK_MAPPING "SDL_XINPUT_USE_OLD_JOYSTICK_MAPPING"
/**
* \brief A variable that causes SDL to not ignore audio "monitors"
*
* This is currently only used for PulseAudio and ignored elsewhere.
*
* By default, SDL ignores audio devices that aren't associated with physical
* hardware. Changing this hint to "1" will expose anything SDL sees that
* appears to be an audio source or sink. This will add "devices" to the list
* that the user probably doesn't want or need, but it can be useful in
* scenarios where you want to hook up SDL to some sort of virtual device,
* etc.
*
* The default value is "0". This hint must be set before SDL_Init().
*
* This hint is available since SDL 2.0.16. Before then, virtual devices are
* always ignored.
*/
#define SDL_HINT_AUDIO_INCLUDE_MONITORS "SDL_AUDIO_INCLUDE_MONITORS"
/**
* \brief A variable that forces X11 windows to create as a custom type.
*
* This is currently only used for X11 and ignored elsewhere.
*
* During SDL_CreateWindow, SDL uses the _NET_WM_WINDOW_TYPE X11 property
* to report to the window manager the type of window it wants to create.
* This might be set to various things if SDL_WINDOW_TOOLTIP or
* SDL_WINDOW_POPUP_MENU, etc, were specified. For "normal" windows that
* haven't set a specific type, this hint can be used to specify a custom
* type. For example, a dock window might set this to
* "_NET_WM_WINDOW_TYPE_DOCK".
*
* If not set or set to "", this hint is ignored. This hint must be set
* before the SDL_CreateWindow() call that it is intended to affect.
*
* This hint is available since SDL 2.0.22.
*/
#define SDL_HINT_X11_WINDOW_TYPE "SDL_X11_WINDOW_TYPE"
/**
* \brief A variable that decides whether to send SDL_QUIT when closing the final window.
*
* By default, SDL sends an SDL_QUIT event when there is only one window
* and it receives an SDL_WINDOWEVENT_CLOSE event, under the assumption most
* apps would also take the loss of this window as a signal to terminate the
* program.
*
* However, it's not unreasonable in some cases to have the program continue
* to live on, perhaps to create new windows later.
*
* Changing this hint to "0" will cause SDL to not send an SDL_QUIT event
* when the final window is requesting to close. Note that in this case,
* there are still other legitimate reasons one might get an SDL_QUIT
* event: choosing "Quit" from the macOS menu bar, sending a SIGINT (ctrl-c)
* on Unix, etc.
*
* The default value is "1". This hint can be changed at any time.
*
* This hint is available since SDL 2.0.22. Before then, you always get
* an SDL_QUIT event when closing the final window.
*/
#define SDL_HINT_QUIT_ON_LAST_WINDOW_CLOSE "SDL_QUIT_ON_LAST_WINDOW_CLOSE"
/**
* \brief A variable that decides what video backend to use.
*
* By default, SDL will try all available video backends in a reasonable
* order until it finds one that can work, but this hint allows the app
* or user to force a specific target, such as "x11" if, say, you are
* on Wayland but want to try talking to the X server instead.
*
* This functionality has existed since SDL 2.0.0 (indeed, before that)
* but before 2.0.22 this was an environment variable only. In 2.0.22,
* it was upgraded to a full SDL hint, so you can set the environment
* variable as usual or programatically set the hint with SDL_SetHint,
* which won't propagate to child processes.
*
* The default value is unset, in which case SDL will try to figure out
* the best video backend on your behalf. This hint needs to be set
* before SDL_Init() is called to be useful.
*
* This hint is available since SDL 2.0.22. Before then, you could set
* the environment variable to get the same effect.
*/
#define SDL_HINT_VIDEODRIVER "SDL_VIDEODRIVER"
/**
* \brief A variable that decides what audio backend to use.
*
* By default, SDL will try all available audio backends in a reasonable
* order until it finds one that can work, but this hint allows the app
* or user to force a specific target, such as "alsa" if, say, you are
* on PulseAudio but want to try talking to the lower level instead.
*
* This functionality has existed since SDL 2.0.0 (indeed, before that)
* but before 2.0.22 this was an environment variable only. In 2.0.22,
* it was upgraded to a full SDL hint, so you can set the environment
* variable as usual or programatically set the hint with SDL_SetHint,
* which won't propagate to child processes.
*
* The default value is unset, in which case SDL will try to figure out
* the best audio backend on your behalf. This hint needs to be set
* before SDL_Init() is called to be useful.
*
* This hint is available since SDL 2.0.22. Before then, you could set
* the environment variable to get the same effect.
*/
#define SDL_HINT_AUDIODRIVER "SDL_AUDIODRIVER"
/**
* \brief A variable that decides what KMSDRM device to use.
*
* Internally, SDL might open something like "/dev/dri/cardNN" to
* access KMSDRM functionality, where "NN" is a device index number.
*
* SDL makes a guess at the best index to use (usually zero), but the
* app or user can set this hint to a number between 0 and 99 to
* force selection.
*
* This hint is available since SDL 2.24.0.
*/
#define SDL_HINT_KMSDRM_DEVICE_INDEX "SDL_KMSDRM_DEVICE_INDEX"
/**
* \brief A variable that treats trackpads as touch devices.
*
* On macOS (and possibly other platforms in the future), SDL will report
* touches on a trackpad as mouse input, which is generally what users
* expect from this device; however, these are often actually full
* multitouch-capable touch devices, so it might be preferable to some apps
* to treat them as such.
*
* Setting this hint to true will make the trackpad input report as a
* multitouch device instead of a mouse. The default is false.
*
* Note that most platforms don't support this hint. As of 2.24.0, it
* only supports MacBooks' trackpads on macOS. Others may follow later.
*
* This hint is checked during SDL_Init and can not be changed after.
*
* This hint is available since SDL 2.24.0.
*/
#define SDL_HINT_TRACKPAD_IS_TOUCH_ONLY "SDL_TRACKPAD_IS_TOUCH_ONLY"
/**
* \brief An enumeration of hint priorities
*/
typedef enum
{
SDL_HINT_DEFAULT,
SDL_HINT_NORMAL,
SDL_HINT_OVERRIDE
} SDL_HintPriority;
/**
* Set a hint with a specific priority.
*
* The priority controls the behavior when setting a hint that already has a
* value. Hints will replace existing hints of their priority and lower.
* Environment variables are considered to have override priority.
*
* \param name the hint to set
* \param value the value of the hint variable
* \param priority the SDL_HintPriority level for the hint
* \returns SDL_TRUE if the hint was set, SDL_FALSE otherwise.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetHint
* \sa SDL_SetHint
*/
extern DECLSPEC SDL_bool SDLCALL SDL_SetHintWithPriority(const char *name,
const char *value,
SDL_HintPriority priority);
/**
* Set a hint with normal priority.
*
* Hints will not be set if there is an existing override hint or environment
* variable that takes precedence. You can use SDL_SetHintWithPriority() to
* set the hint with override priority instead.
*
* \param name the hint to set
* \param value the value of the hint variable
* \returns SDL_TRUE if the hint was set, SDL_FALSE otherwise.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetHint
* \sa SDL_SetHintWithPriority
*/
extern DECLSPEC SDL_bool SDLCALL SDL_SetHint(const char *name,
const char *value);
/**
* Reset a hint to the default value.
*
* This will reset a hint to the value of the environment variable, or NULL if
* the environment isn't set. Callbacks will be called normally with this
* change.
*
* \param name the hint to set
* \returns SDL_TRUE if the hint was set, SDL_FALSE otherwise.
*
* \since This function is available since SDL 2.24.0.
*
* \sa SDL_GetHint
* \sa SDL_SetHint
*/
extern DECLSPEC SDL_bool SDLCALL SDL_ResetHint(const char *name);
/**
* Reset all hints to the default values.
*
* This will reset all hints to the value of the associated environment
* variable, or NULL if the environment isn't set. Callbacks will be called
* normally with this change.
*
* \since This function is available since SDL 2.26.0.
*
* \sa SDL_GetHint
* \sa SDL_SetHint
* \sa SDL_ResetHint
*/
extern DECLSPEC void SDLCALL SDL_ResetHints(void);
/**
* Get the value of a hint.
*
* \param name the hint to query
* \returns the string value of a hint or NULL if the hint isn't set.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_SetHint
* \sa SDL_SetHintWithPriority
*/
extern DECLSPEC const char * SDLCALL SDL_GetHint(const char *name);
/**
* Get the boolean value of a hint variable.
*
* \param name the name of the hint to get the boolean value from
* \param default_value the value to return if the hint does not exist
* \returns the boolean value of a hint or the provided default value if the
* hint does not exist.
*
* \since This function is available since SDL 2.0.5.
*
* \sa SDL_GetHint
* \sa SDL_SetHint
*/
extern DECLSPEC SDL_bool SDLCALL SDL_GetHintBoolean(const char *name, SDL_bool default_value);
/**
* Type definition of the hint callback function.
*
* \param userdata what was passed as `userdata` to SDL_AddHintCallback()
* \param name what was passed as `name` to SDL_AddHintCallback()
* \param oldValue the previous hint value
* \param newValue the new value hint is to be set to
*/
typedef void (SDLCALL *SDL_HintCallback)(void *userdata, const char *name, const char *oldValue, const char *newValue);
/**
* Add a function to watch a particular hint.
*
* \param name the hint to watch
* \param callback An SDL_HintCallback function that will be called when the
* hint value changes
* \param userdata a pointer to pass to the callback function
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_DelHintCallback
*/
extern DECLSPEC void SDLCALL SDL_AddHintCallback(const char *name,
SDL_HintCallback callback,
void *userdata);
/**
* Remove a function watching a particular hint.
*
* \param name the hint being watched
* \param callback An SDL_HintCallback function that will be called when the
* hint value changes
* \param userdata a pointer being passed to the callback function
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_AddHintCallback
*/
extern DECLSPEC void SDLCALL SDL_DelHintCallback(const char *name,
SDL_HintCallback callback,
void *userdata);
/**
* Clear all hints.
*
* This function is automatically called during SDL_Quit(), and deletes all
* callbacks without calling them and frees all memory associated with hints.
* If you're calling this from application code you probably want to call
* SDL_ResetHints() instead.
*
* This function will be removed from the API the next time we rev the ABI.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_ResetHints
*/
extern DECLSPEC void SDLCALL SDL_ClearHints(void);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_hints_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_vulkan.h | /*
Simple DirectMedia Layer
Copyright (C) 2017, Mark Callow
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_vulkan.h
*
* Header file for functions to creating Vulkan surfaces on SDL windows.
*/
#ifndef SDL_vulkan_h_
#define SDL_vulkan_h_
#include "SDL_video.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/* Avoid including vulkan.h, don't define VkInstance if it's already included */
#ifdef VULKAN_H_
#define NO_SDL_VULKAN_TYPEDEFS
#endif
#ifndef NO_SDL_VULKAN_TYPEDEFS
#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
#else
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
#endif
VK_DEFINE_HANDLE(VkInstance)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
#endif /* !NO_SDL_VULKAN_TYPEDEFS */
typedef VkInstance SDL_vulkanInstance;
typedef VkSurfaceKHR SDL_vulkanSurface; /* for compatibility with Tizen */
/**
* \name Vulkan support functions
*
* \note SDL_Vulkan_GetInstanceExtensions & SDL_Vulkan_CreateSurface API
* is compatable with Tizen's implementation of Vulkan in SDL.
*/
/* @{ */
/**
* Dynamically load the Vulkan loader library.
*
* This should be called after initializing the video driver, but before
* creating any Vulkan windows. If no Vulkan loader library is loaded, the
* default library will be loaded upon creation of the first Vulkan window.
*
* It is fairly common for Vulkan applications to link with libvulkan instead
* of explicitly loading it at run time. This will work with SDL provided the
* application links to a dynamic library and both it and SDL use the same
* search path.
*
* If you specify a non-NULL `path`, an application should retrieve all of the
* Vulkan functions it uses from the dynamic library using
* SDL_Vulkan_GetVkGetInstanceProcAddr unless you can guarantee `path` points
* to the same vulkan loader library the application linked to.
*
* On Apple devices, if `path` is NULL, SDL will attempt to find the
* `vkGetInstanceProcAddr` address within all the Mach-O images of the current
* process. This is because it is fairly common for Vulkan applications to
* link with libvulkan (and historically MoltenVK was provided as a static
* library). If it is not found, on macOS, SDL will attempt to load
* `vulkan.framework/vulkan`, `libvulkan.1.dylib`,
* `MoltenVK.framework/MoltenVK`, and `libMoltenVK.dylib`, in that order. On
* iOS, SDL will attempt to load `libMoltenVK.dylib`. Applications using a
* dynamic framework or .dylib must ensure it is included in its application
* bundle.
*
* On non-Apple devices, application linking with a static libvulkan is not
* supported. Either do not link to the Vulkan loader or link to a dynamic
* library version.
*
* \param path The platform dependent Vulkan loader library name or NULL
* \returns 0 on success or -1 if the library couldn't be loaded; call
* SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.6.
*
* \sa SDL_Vulkan_GetVkInstanceProcAddr
* \sa SDL_Vulkan_UnloadLibrary
*/
extern DECLSPEC int SDLCALL SDL_Vulkan_LoadLibrary(const char *path);
/**
* Get the address of the `vkGetInstanceProcAddr` function.
*
* This should be called after either calling SDL_Vulkan_LoadLibrary() or
* creating an SDL_Window with the `SDL_WINDOW_VULKAN` flag.
*
* \returns the function pointer for `vkGetInstanceProcAddr` or NULL on error.
*
* \since This function is available since SDL 2.0.6.
*/
extern DECLSPEC void *SDLCALL SDL_Vulkan_GetVkGetInstanceProcAddr(void);
/**
* Unload the Vulkan library previously loaded by SDL_Vulkan_LoadLibrary()
*
* \since This function is available since SDL 2.0.6.
*
* \sa SDL_Vulkan_LoadLibrary
*/
extern DECLSPEC void SDLCALL SDL_Vulkan_UnloadLibrary(void);
/**
* Get the names of the Vulkan instance extensions needed to create a surface
* with SDL_Vulkan_CreateSurface.
*
* If `pNames` is NULL, then the number of required Vulkan instance extensions
* is returned in `pCount`. Otherwise, `pCount` must point to a variable set
* to the number of elements in the `pNames` array, and on return the variable
* is overwritten with the number of names actually written to `pNames`. If
* `pCount` is less than the number of required extensions, at most `pCount`
* structures will be written. If `pCount` is smaller than the number of
* required extensions, SDL_FALSE will be returned instead of SDL_TRUE, to
* indicate that not all the required extensions were returned.
*
* The `window` parameter is currently needed to be valid as of SDL 2.0.8,
* however, this parameter will likely be removed in future releases
*
* \param window A window for which the required Vulkan instance extensions
* should be retrieved (will be deprecated in a future release)
* \param pCount A pointer to an unsigned int corresponding to the number of
* extensions to be returned
* \param pNames NULL or a pointer to an array to be filled with required
* Vulkan instance extensions
* \returns SDL_TRUE on success, SDL_FALSE on error.
*
* \since This function is available since SDL 2.0.6.
*
* \sa SDL_Vulkan_CreateSurface
*/
extern DECLSPEC SDL_bool SDLCALL SDL_Vulkan_GetInstanceExtensions(SDL_Window *window,
unsigned int *pCount,
const char **pNames);
/**
* Create a Vulkan rendering surface for a window.
*
* The `window` must have been created with the `SDL_WINDOW_VULKAN` flag and
* `instance` must have been created with extensions returned by
* SDL_Vulkan_GetInstanceExtensions() enabled.
*
* \param window The window to which to attach the Vulkan surface
* \param instance The Vulkan instance handle
* \param surface A pointer to a VkSurfaceKHR handle to output the newly
* created surface
* \returns SDL_TRUE on success, SDL_FALSE on error.
*
* \since This function is available since SDL 2.0.6.
*
* \sa SDL_Vulkan_GetInstanceExtensions
* \sa SDL_Vulkan_GetDrawableSize
*/
extern DECLSPEC SDL_bool SDLCALL SDL_Vulkan_CreateSurface(SDL_Window *window,
VkInstance instance,
VkSurfaceKHR* surface);
/**
* Get the size of the window's underlying drawable dimensions in pixels.
*
* This may differ from SDL_GetWindowSize() if we're rendering to a high-DPI
* drawable, i.e. the window was created with `SDL_WINDOW_ALLOW_HIGHDPI` on a
* platform with high-DPI support (Apple calls this "Retina"), and not
* disabled by the `SDL_HINT_VIDEO_HIGHDPI_DISABLED` hint.
*
* \param window an SDL_Window for which the size is to be queried
* \param w Pointer to the variable to write the width to or NULL
* \param h Pointer to the variable to write the height to or NULL
*
* \since This function is available since SDL 2.0.6.
*
* \sa SDL_GetWindowSize
* \sa SDL_CreateWindow
* \sa SDL_Vulkan_CreateSurface
*/
extern DECLSPEC void SDLCALL SDL_Vulkan_GetDrawableSize(SDL_Window * window,
int *w, int *h);
/* @} *//* Vulkan support functions */
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_vulkan_h_ */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_types.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_types.h
*
* \deprecated
*/
/* DEPRECATED */
#include "SDL_stdinc.h"
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_config_windows.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2017 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef SDL_config_windows_h_
#define SDL_config_windows_h_
#define SDL_config_h_
#include "SDL_platform.h"
/* This is a set of defines to configure the SDL features */
#if !defined(_STDINT_H_) && (!defined(HAVE_STDINT_H) || !_HAVE_STDINT_H)
#if defined(__GNUC__) || defined(__DMC__) || defined(__WATCOMC__)
#define HAVE_STDINT_H 1
#elif defined(_MSC_VER)
typedef signed __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef signed __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef signed __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#ifndef _UINTPTR_T_DEFINED
#ifdef _WIN64
typedef unsigned __int64 uintptr_t;
#else
typedef unsigned int uintptr_t;
#endif
#define _UINTPTR_T_DEFINED
#endif
/* Older Visual C++ headers don't have the Win64-compatible typedefs... */
#if ((_MSC_VER <= 1200) && (!defined(DWORD_PTR)))
#define DWORD_PTR DWORD
#endif
#if ((_MSC_VER <= 1200) && (!defined(LONG_PTR)))
#define LONG_PTR LONG
#endif
#else /* !__GNUC__ && !_MSC_VER */
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed short int16_t;
typedef unsigned short uint16_t;
typedef signed int int32_t;
typedef unsigned int uint32_t;
typedef signed long long int64_t;
typedef unsigned long long uint64_t;
#ifndef _SIZE_T_DEFINED_
#define _SIZE_T_DEFINED_
typedef unsigned int size_t;
#endif
typedef unsigned int uintptr_t;
#endif /* __GNUC__ || _MSC_VER */
#endif /* !_STDINT_H_ && !HAVE_STDINT_H */
#ifdef _WIN64
# define SIZEOF_VOIDP 8
#else
# define SIZEOF_VOIDP 4
#endif
#define HAVE_DDRAW_H 1
#define HAVE_DINPUT_H 1
#define HAVE_DSOUND_H 1
#define HAVE_DXGI_H 1
#define HAVE_XINPUT_H 1
/* This is disabled by default to avoid C runtime dependencies and manifest requirements */
#ifdef HAVE_LIBC
/* Useful headers */
#define HAVE_STDIO_H 1
#define STDC_HEADERS 1
#define HAVE_STRING_H 1
#define HAVE_CTYPE_H 1
#define HAVE_MATH_H 1
#define HAVE_SIGNAL_H 1
/* C library functions */
#define HAVE_MALLOC 1
#define HAVE_CALLOC 1
#define HAVE_REALLOC 1
#define HAVE_FREE 1
#define HAVE_ALLOCA 1
#define HAVE_QSORT 1
#define HAVE_ABS 1
#define HAVE_MEMSET 1
#define HAVE_MEMCPY 1
#define HAVE_MEMMOVE 1
#define HAVE_MEMCMP 1
#define HAVE_STRLEN 1
#define HAVE__STRREV 1
#define HAVE__STRUPR 1
#define HAVE__STRLWR 1
#define HAVE_STRCHR 1
#define HAVE_STRRCHR 1
#define HAVE_STRSTR 1
#define HAVE__LTOA 1
#define HAVE__ULTOA 1
#define HAVE_STRTOL 1
#define HAVE_STRTOUL 1
#define HAVE_STRTOD 1
#define HAVE_ATOI 1
#define HAVE_ATOF 1
#define HAVE_STRCMP 1
#define HAVE_STRNCMP 1
#define HAVE__STRICMP 1
#define HAVE__STRNICMP 1
#define HAVE_ATAN 1
#define HAVE_ATAN2 1
#define HAVE_ACOS 1
#define HAVE_ASIN 1
#define HAVE_CEIL 1
#define HAVE_COS 1
#define HAVE_COSF 1
#define HAVE_FABS 1
#define HAVE_FLOOR 1
#define HAVE_LOG 1
#define HAVE_POW 1
#define HAVE_SIN 1
#define HAVE_SINF 1
#define HAVE_SQRT 1
#define HAVE_SQRTF 1
#define HAVE_TAN 1
#define HAVE_TANF 1
#if _MSC_VER >= 1800
#define HAVE_STRTOLL 1
#define HAVE_VSSCANF 1
#define HAVE_COPYSIGN 1
#define HAVE_SCALBN 1
#endif
#if !defined(_MSC_VER) || defined(_USE_MATH_DEFINES)
#define HAVE_M_PI 1
#endif
#else
#define HAVE_STDARG_H 1
#define HAVE_STDDEF_H 1
#endif
/* Enable various audio drivers */
#define SDL_AUDIO_DRIVER_WASAPI 1
#define SDL_AUDIO_DRIVER_DSOUND 1
#define SDL_AUDIO_DRIVER_XAUDIO2 0
#define SDL_AUDIO_DRIVER_WINMM 1
#define SDL_AUDIO_DRIVER_DISK 1
#define SDL_AUDIO_DRIVER_DUMMY 1
/* Enable various input drivers */
#define SDL_JOYSTICK_DINPUT 1
#define SDL_JOYSTICK_XINPUT 1
#define SDL_HAPTIC_DINPUT 1
#define SDL_HAPTIC_XINPUT 1
/* Enable various shared object loading systems */
#define SDL_LOADSO_WINDOWS 1
/* Enable various threading systems */
#define SDL_THREAD_WINDOWS 1
/* Enable various timer systems */
#define SDL_TIMER_WINDOWS 1
/* Enable various video drivers */
#define SDL_VIDEO_DRIVER_DUMMY 1
#define SDL_VIDEO_DRIVER_WINDOWS 1
#ifndef SDL_VIDEO_RENDER_D3D
#define SDL_VIDEO_RENDER_D3D 1
#endif
#ifndef SDL_VIDEO_RENDER_D3D11
#define SDL_VIDEO_RENDER_D3D11 0
#endif
/* Enable OpenGL support */
#ifndef SDL_VIDEO_OPENGL
#define SDL_VIDEO_OPENGL 1
#endif
#ifndef SDL_VIDEO_OPENGL_WGL
#define SDL_VIDEO_OPENGL_WGL 1
#endif
#ifndef SDL_VIDEO_RENDER_OGL
#define SDL_VIDEO_RENDER_OGL 1
#endif
#ifndef SDL_VIDEO_RENDER_OGL_ES2
#define SDL_VIDEO_RENDER_OGL_ES2 1
#endif
#ifndef SDL_VIDEO_OPENGL_ES2
#define SDL_VIDEO_OPENGL_ES2 1
#endif
#ifndef SDL_VIDEO_OPENGL_EGL
#define SDL_VIDEO_OPENGL_EGL 1
#endif
/* Enable Vulkan support */
#define SDL_VIDEO_VULKAN 1
/* Enable system power support */
#define SDL_POWER_WINDOWS 1
/* Enable filesystem support */
#define SDL_FILESYSTEM_WINDOWS 1
/* Enable assembly routines (Win64 doesn't have inline asm) */
#ifndef _WIN64
#define SDL_ASSEMBLY_ROUTINES 1
#endif
#endif /* SDL_config_windows_h_ */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_audio.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/* !!! FIXME: several functions in here need Doxygen comments. */
/**
* \file SDL_audio.h
*
* Access to the raw audio mixing buffer for the SDL library.
*/
#ifndef SDL_audio_h_
#define SDL_audio_h_
#include "SDL_stdinc.h"
#include "SDL_error.h"
#include "SDL_endian.h"
#include "SDL_mutex.h"
#include "SDL_thread.h"
#include "SDL_rwops.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Audio format flags.
*
* These are what the 16 bits in SDL_AudioFormat currently mean...
* (Unspecified bits are always zero).
*
* \verbatim
++-----------------------sample is signed if set
||
|| ++-----------sample is bigendian if set
|| ||
|| || ++---sample is float if set
|| || ||
|| || || +---sample bit size---+
|| || || | |
15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00
\endverbatim
*
* There are macros in SDL 2.0 and later to query these bits.
*/
typedef Uint16 SDL_AudioFormat;
/**
* \name Audio flags
*/
/* @{ */
#define SDL_AUDIO_MASK_BITSIZE (0xFF)
#define SDL_AUDIO_MASK_DATATYPE (1<<8)
#define SDL_AUDIO_MASK_ENDIAN (1<<12)
#define SDL_AUDIO_MASK_SIGNED (1<<15)
#define SDL_AUDIO_BITSIZE(x) (x & SDL_AUDIO_MASK_BITSIZE)
#define SDL_AUDIO_ISFLOAT(x) (x & SDL_AUDIO_MASK_DATATYPE)
#define SDL_AUDIO_ISBIGENDIAN(x) (x & SDL_AUDIO_MASK_ENDIAN)
#define SDL_AUDIO_ISSIGNED(x) (x & SDL_AUDIO_MASK_SIGNED)
#define SDL_AUDIO_ISINT(x) (!SDL_AUDIO_ISFLOAT(x))
#define SDL_AUDIO_ISLITTLEENDIAN(x) (!SDL_AUDIO_ISBIGENDIAN(x))
#define SDL_AUDIO_ISUNSIGNED(x) (!SDL_AUDIO_ISSIGNED(x))
/**
* \name Audio format flags
*
* Defaults to LSB byte order.
*/
/* @{ */
#define AUDIO_U8 0x0008 /**< Unsigned 8-bit samples */
#define AUDIO_S8 0x8008 /**< Signed 8-bit samples */
#define AUDIO_U16LSB 0x0010 /**< Unsigned 16-bit samples */
#define AUDIO_S16LSB 0x8010 /**< Signed 16-bit samples */
#define AUDIO_U16MSB 0x1010 /**< As above, but big-endian byte order */
#define AUDIO_S16MSB 0x9010 /**< As above, but big-endian byte order */
#define AUDIO_U16 AUDIO_U16LSB
#define AUDIO_S16 AUDIO_S16LSB
/* @} */
/**
* \name int32 support
*/
/* @{ */
#define AUDIO_S32LSB 0x8020 /**< 32-bit integer samples */
#define AUDIO_S32MSB 0x9020 /**< As above, but big-endian byte order */
#define AUDIO_S32 AUDIO_S32LSB
/* @} */
/**
* \name float32 support
*/
/* @{ */
#define AUDIO_F32LSB 0x8120 /**< 32-bit floating point samples */
#define AUDIO_F32MSB 0x9120 /**< As above, but big-endian byte order */
#define AUDIO_F32 AUDIO_F32LSB
/* @} */
/**
* \name Native audio byte ordering
*/
/* @{ */
#if SDL_BYTEORDER == SDL_LIL_ENDIAN
#define AUDIO_U16SYS AUDIO_U16LSB
#define AUDIO_S16SYS AUDIO_S16LSB
#define AUDIO_S32SYS AUDIO_S32LSB
#define AUDIO_F32SYS AUDIO_F32LSB
#else
#define AUDIO_U16SYS AUDIO_U16MSB
#define AUDIO_S16SYS AUDIO_S16MSB
#define AUDIO_S32SYS AUDIO_S32MSB
#define AUDIO_F32SYS AUDIO_F32MSB
#endif
/* @} */
/**
* \name Allow change flags
*
* Which audio format changes are allowed when opening a device.
*/
/* @{ */
#define SDL_AUDIO_ALLOW_FREQUENCY_CHANGE 0x00000001
#define SDL_AUDIO_ALLOW_FORMAT_CHANGE 0x00000002
#define SDL_AUDIO_ALLOW_CHANNELS_CHANGE 0x00000004
#define SDL_AUDIO_ALLOW_SAMPLES_CHANGE 0x00000008
#define SDL_AUDIO_ALLOW_ANY_CHANGE (SDL_AUDIO_ALLOW_FREQUENCY_CHANGE|SDL_AUDIO_ALLOW_FORMAT_CHANGE|SDL_AUDIO_ALLOW_CHANNELS_CHANGE|SDL_AUDIO_ALLOW_SAMPLES_CHANGE)
/* @} */
/* @} *//* Audio flags */
/**
* This function is called when the audio device needs more data.
*
* \param userdata An application-specific parameter saved in
* the SDL_AudioSpec structure
* \param stream A pointer to the audio data buffer.
* \param len The length of that buffer in bytes.
*
* Once the callback returns, the buffer will no longer be valid.
* Stereo samples are stored in a LRLRLR ordering.
*
* You can choose to avoid callbacks and use SDL_QueueAudio() instead, if
* you like. Just open your audio device with a NULL callback.
*/
typedef void (SDLCALL * SDL_AudioCallback) (void *userdata, Uint8 * stream,
int len);
/**
* The calculated values in this structure are calculated by SDL_OpenAudio().
*
* For multi-channel audio, the default SDL channel mapping is:
* 2: FL FR (stereo)
* 3: FL FR LFE (2.1 surround)
* 4: FL FR BL BR (quad)
* 5: FL FR LFE BL BR (4.1 surround)
* 6: FL FR FC LFE SL SR (5.1 surround - last two can also be BL BR)
* 7: FL FR FC LFE BC SL SR (6.1 surround)
* 8: FL FR FC LFE BL BR SL SR (7.1 surround)
*/
typedef struct SDL_AudioSpec
{
int freq; /**< DSP frequency -- samples per second */
SDL_AudioFormat format; /**< Audio data format */
Uint8 channels; /**< Number of channels: 1 mono, 2 stereo */
Uint8 silence; /**< Audio buffer silence value (calculated) */
Uint16 samples; /**< Audio buffer size in sample FRAMES (total samples divided by channel count) */
Uint16 padding; /**< Necessary for some compile environments */
Uint32 size; /**< Audio buffer size in bytes (calculated) */
SDL_AudioCallback callback; /**< Callback that feeds the audio device (NULL to use SDL_QueueAudio()). */
void *userdata; /**< Userdata passed to callback (ignored for NULL callbacks). */
} SDL_AudioSpec;
struct SDL_AudioCVT;
typedef void (SDLCALL * SDL_AudioFilter) (struct SDL_AudioCVT * cvt,
SDL_AudioFormat format);
/**
* \brief Upper limit of filters in SDL_AudioCVT
*
* The maximum number of SDL_AudioFilter functions in SDL_AudioCVT is
* currently limited to 9. The SDL_AudioCVT.filters array has 10 pointers,
* one of which is the terminating NULL pointer.
*/
#define SDL_AUDIOCVT_MAX_FILTERS 9
/**
* \struct SDL_AudioCVT
* \brief A structure to hold a set of audio conversion filters and buffers.
*
* Note that various parts of the conversion pipeline can take advantage
* of SIMD operations (like SSE2, for example). SDL_AudioCVT doesn't require
* you to pass it aligned data, but can possibly run much faster if you
* set both its (buf) field to a pointer that is aligned to 16 bytes, and its
* (len) field to something that's a multiple of 16, if possible.
*/
#if defined(__GNUC__) && !defined(__CHERI_PURE_CAPABILITY__)
/* This structure is 84 bytes on 32-bit architectures, make sure GCC doesn't
pad it out to 88 bytes to guarantee ABI compatibility between compilers.
This is not a concern on CHERI architectures, where pointers must be stored
at aligned locations otherwise they will become invalid, and thus structs
containing pointers cannot be packed without giving a warning or error.
vvv
The next time we rev the ABI, make sure to size the ints and add padding.
*/
#define SDL_AUDIOCVT_PACKED __attribute__((packed))
#else
#define SDL_AUDIOCVT_PACKED
#endif
/* */
typedef struct SDL_AudioCVT
{
int needed; /**< Set to 1 if conversion possible */
SDL_AudioFormat src_format; /**< Source audio format */
SDL_AudioFormat dst_format; /**< Target audio format */
double rate_incr; /**< Rate conversion increment */
Uint8 *buf; /**< Buffer to hold entire audio data */
int len; /**< Length of original audio buffer */
int len_cvt; /**< Length of converted audio buffer */
int len_mult; /**< buffer must be len*len_mult big */
double len_ratio; /**< Given len, final size is len*len_ratio */
SDL_AudioFilter filters[SDL_AUDIOCVT_MAX_FILTERS + 1]; /**< NULL-terminated list of filter functions */
int filter_index; /**< Current audio conversion function */
} SDL_AUDIOCVT_PACKED SDL_AudioCVT;
/* Function prototypes */
/**
* \name Driver discovery functions
*
* These functions return the list of built in audio drivers, in the
* order that they are normally initialized by default.
*/
/* @{ */
/**
* Use this function to get the number of built-in audio drivers.
*
* This function returns a hardcoded number. This never returns a negative
* value; if there are no drivers compiled into this build of SDL, this
* function returns zero. The presence of a driver in this list does not mean
* it will function, it just means SDL is capable of interacting with that
* interface. For example, a build of SDL might have esound support, but if
* there's no esound server available, SDL's esound driver would fail if used.
*
* By default, SDL tries all drivers, in its preferred order, until one is
* found to be usable.
*
* \returns the number of built-in audio drivers.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetAudioDriver
*/
extern DECLSPEC int SDLCALL SDL_GetNumAudioDrivers(void);
/**
* Use this function to get the name of a built in audio driver.
*
* The list of audio drivers is given in the order that they are normally
* initialized by default; the drivers that seem more reasonable to choose
* first (as far as the SDL developers believe) are earlier in the list.
*
* The names of drivers are all simple, low-ASCII identifiers, like "alsa",
* "coreaudio" or "xaudio2". These never have Unicode characters, and are not
* meant to be proper names.
*
* \param index the index of the audio driver; the value ranges from 0 to
* SDL_GetNumAudioDrivers() - 1
* \returns the name of the audio driver at the requested index, or NULL if an
* invalid index was specified.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetNumAudioDrivers
*/
extern DECLSPEC const char *SDLCALL SDL_GetAudioDriver(int index);
/* @} */
/**
* \name Initialization and cleanup
*
* \internal These functions are used internally, and should not be used unless
* you have a specific need to specify the audio driver you want to
* use. You should normally use SDL_Init() or SDL_InitSubSystem().
*/
/* @{ */
/**
* Use this function to initialize a particular audio driver.
*
* This function is used internally, and should not be used unless you have a
* specific need to designate the audio driver you want to use. You should
* normally use SDL_Init() or SDL_InitSubSystem().
*
* \param driver_name the name of the desired audio driver
* \returns 0 on success or a negative error code on failure; call
* SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_AudioQuit
*/
extern DECLSPEC int SDLCALL SDL_AudioInit(const char *driver_name);
/**
* Use this function to shut down audio if you initialized it with
* SDL_AudioInit().
*
* This function is used internally, and should not be used unless you have a
* specific need to specify the audio driver you want to use. You should
* normally use SDL_Quit() or SDL_QuitSubSystem().
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_AudioInit
*/
extern DECLSPEC void SDLCALL SDL_AudioQuit(void);
/* @} */
/**
* Get the name of the current audio driver.
*
* The returned string points to internal static memory and thus never becomes
* invalid, even if you quit the audio subsystem and initialize a new driver
* (although such a case would return a different static string from another
* call to this function, of course). As such, you should not modify or free
* the returned string.
*
* \returns the name of the current audio driver or NULL if no driver has been
* initialized.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_AudioInit
*/
extern DECLSPEC const char *SDLCALL SDL_GetCurrentAudioDriver(void);
/**
* This function is a legacy means of opening the audio device.
*
* This function remains for compatibility with SDL 1.2, but also because it's
* slightly easier to use than the new functions in SDL 2.0. The new, more
* powerful, and preferred way to do this is SDL_OpenAudioDevice().
*
* This function is roughly equivalent to:
*
* ```c
* SDL_OpenAudioDevice(NULL, 0, desired, obtained, SDL_AUDIO_ALLOW_ANY_CHANGE);
* ```
*
* With two notable exceptions:
*
* - If `obtained` is NULL, we use `desired` (and allow no changes), which
* means desired will be modified to have the correct values for silence,
* etc, and SDL will convert any differences between your app's specific
* request and the hardware behind the scenes.
* - The return value is always success or failure, and not a device ID, which
* means you can only have one device open at a time with this function.
*
* \param desired an SDL_AudioSpec structure representing the desired output
* format. Please refer to the SDL_OpenAudioDevice
* documentation for details on how to prepare this structure.
* \param obtained an SDL_AudioSpec structure filled in with the actual
* parameters, or NULL.
* \returns 0 if successful, placing the actual hardware parameters in the
* structure pointed to by `obtained`.
*
* If `obtained` is NULL, the audio data passed to the callback
* function will be guaranteed to be in the requested format, and
* will be automatically converted to the actual hardware audio
* format if necessary. If `obtained` is NULL, `desired` will have
* fields modified.
*
* This function returns a negative error code on failure to open the
* audio device or failure to set up the audio thread; call
* SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_CloseAudio
* \sa SDL_LockAudio
* \sa SDL_PauseAudio
* \sa SDL_UnlockAudio
*/
extern DECLSPEC int SDLCALL SDL_OpenAudio(SDL_AudioSpec * desired,
SDL_AudioSpec * obtained);
/**
* SDL Audio Device IDs.
*
* A successful call to SDL_OpenAudio() is always device id 1, and legacy
* SDL audio APIs assume you want this device ID. SDL_OpenAudioDevice() calls
* always returns devices >= 2 on success. The legacy calls are good both
* for backwards compatibility and when you don't care about multiple,
* specific, or capture devices.
*/
typedef Uint32 SDL_AudioDeviceID;
/**
* Get the number of built-in audio devices.
*
* This function is only valid after successfully initializing the audio
* subsystem.
*
* Note that audio capture support is not implemented as of SDL 2.0.4, so the
* `iscapture` parameter is for future expansion and should always be zero for
* now.
*
* This function will return -1 if an explicit list of devices can't be
* determined. Returning -1 is not an error. For example, if SDL is set up to
* talk to a remote audio server, it can't list every one available on the
* Internet, but it will still allow a specific host to be specified in
* SDL_OpenAudioDevice().
*
* In many common cases, when this function returns a value <= 0, it can still
* successfully open the default device (NULL for first argument of
* SDL_OpenAudioDevice()).
*
* This function may trigger a complete redetect of available hardware. It
* should not be called for each iteration of a loop, but rather once at the
* start of a loop:
*
* ```c
* // Don't do this:
* for (int i = 0; i < SDL_GetNumAudioDevices(0); i++)
*
* // do this instead:
* const int count = SDL_GetNumAudioDevices(0);
* for (int i = 0; i < count; ++i) { do_something_here(); }
* ```
*
* \param iscapture zero to request playback devices, non-zero to request
* recording devices
* \returns the number of available devices exposed by the current driver or
* -1 if an explicit list of devices can't be determined. A return
* value of -1 does not necessarily mean an error condition.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetAudioDeviceName
* \sa SDL_OpenAudioDevice
*/
extern DECLSPEC int SDLCALL SDL_GetNumAudioDevices(int iscapture);
/**
* Get the human-readable name of a specific audio device.
*
* This function is only valid after successfully initializing the audio
* subsystem. The values returned by this function reflect the latest call to
* SDL_GetNumAudioDevices(); re-call that function to redetect available
* hardware.
*
* The string returned by this function is UTF-8 encoded, read-only, and
* managed internally. You are not to free it. If you need to keep the string
* for any length of time, you should make your own copy of it, as it will be
* invalid next time any of several other SDL functions are called.
*
* \param index the index of the audio device; valid values range from 0 to
* SDL_GetNumAudioDevices() - 1
* \param iscapture non-zero to query the list of recording devices, zero to
* query the list of output devices.
* \returns the name of the audio device at the requested index, or NULL on
* error.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetNumAudioDevices
* \sa SDL_GetDefaultAudioInfo
*/
extern DECLSPEC const char *SDLCALL SDL_GetAudioDeviceName(int index,
int iscapture);
/**
* Get the preferred audio format of a specific audio device.
*
* This function is only valid after a successfully initializing the audio
* subsystem. The values returned by this function reflect the latest call to
* SDL_GetNumAudioDevices(); re-call that function to redetect available
* hardware.
*
* `spec` will be filled with the sample rate, sample format, and channel
* count.
*
* \param index the index of the audio device; valid values range from 0 to
* SDL_GetNumAudioDevices() - 1
* \param iscapture non-zero to query the list of recording devices, zero to
* query the list of output devices.
* \param spec The SDL_AudioSpec to be initialized by this function.
* \returns 0 on success, nonzero on error
*
* \since This function is available since SDL 2.0.16.
*
* \sa SDL_GetNumAudioDevices
* \sa SDL_GetDefaultAudioInfo
*/
extern DECLSPEC int SDLCALL SDL_GetAudioDeviceSpec(int index,
int iscapture,
SDL_AudioSpec *spec);
/**
* Get the name and preferred format of the default audio device.
*
* Some (but not all!) platforms have an isolated mechanism to get information
* about the "default" device. This can actually be a completely different
* device that's not in the list you get from SDL_GetAudioDeviceSpec(). It can
* even be a network address! (This is discussed in SDL_OpenAudioDevice().)
*
* As a result, this call is not guaranteed to be performant, as it can query
* the sound server directly every time, unlike the other query functions. You
* should call this function sparingly!
*
* `spec` will be filled with the sample rate, sample format, and channel
* count, if a default device exists on the system. If `name` is provided,
* will be filled with either a dynamically-allocated UTF-8 string or NULL.
*
* \param name A pointer to be filled with the name of the default device (can
* be NULL). Please call SDL_free() when you are done with this
* pointer!
* \param spec The SDL_AudioSpec to be initialized by this function.
* \param iscapture non-zero to query the default recording device, zero to
* query the default output device.
* \returns 0 on success, nonzero on error
*
* \since This function is available since SDL 2.24.0.
*
* \sa SDL_GetAudioDeviceName
* \sa SDL_GetAudioDeviceSpec
* \sa SDL_OpenAudioDevice
*/
extern DECLSPEC int SDLCALL SDL_GetDefaultAudioInfo(char **name,
SDL_AudioSpec *spec,
int iscapture);
/**
* Open a specific audio device.
*
* SDL_OpenAudio(), unlike this function, always acts on device ID 1. As such,
* this function will never return a 1 so as not to conflict with the legacy
* function.
*
* Please note that SDL 2.0 before 2.0.5 did not support recording; as such,
* this function would fail if `iscapture` was not zero. Starting with SDL
* 2.0.5, recording is implemented and this value can be non-zero.
*
* Passing in a `device` name of NULL requests the most reasonable default
* (and is equivalent to what SDL_OpenAudio() does to choose a device). The
* `device` name is a UTF-8 string reported by SDL_GetAudioDeviceName(), but
* some drivers allow arbitrary and driver-specific strings, such as a
* hostname/IP address for a remote audio server, or a filename in the
* diskaudio driver.
*
* An opened audio device starts out paused, and should be enabled for playing
* by calling SDL_PauseAudioDevice(devid, 0) when you are ready for your audio
* callback function to be called. Since the audio driver may modify the
* requested size of the audio buffer, you should allocate any local mixing
* buffers after you open the audio device.
*
* The audio callback runs in a separate thread in most cases; you can prevent
* race conditions between your callback and other threads without fully
* pausing playback with SDL_LockAudioDevice(). For more information about the
* callback, see SDL_AudioSpec.
*
* Managing the audio spec via 'desired' and 'obtained':
*
* When filling in the desired audio spec structure:
*
* - `desired->freq` should be the frequency in sample-frames-per-second (Hz).
* - `desired->format` should be the audio format (`AUDIO_S16SYS`, etc).
* - `desired->samples` is the desired size of the audio buffer, in _sample
* frames_ (with stereo output, two samples--left and right--would make a
* single sample frame). This number should be a power of two, and may be
* adjusted by the audio driver to a value more suitable for the hardware.
* Good values seem to range between 512 and 8096 inclusive, depending on
* the application and CPU speed. Smaller values reduce latency, but can
* lead to underflow if the application is doing heavy processing and cannot
* fill the audio buffer in time. Note that the number of sample frames is
* directly related to time by the following formula: `ms =
* (sampleframes*1000)/freq`
* - `desired->size` is the size in _bytes_ of the audio buffer, and is
* calculated by SDL_OpenAudioDevice(). You don't initialize this.
* - `desired->silence` is the value used to set the buffer to silence, and is
* calculated by SDL_OpenAudioDevice(). You don't initialize this.
* - `desired->callback` should be set to a function that will be called when
* the audio device is ready for more data. It is passed a pointer to the
* audio buffer, and the length in bytes of the audio buffer. This function
* usually runs in a separate thread, and so you should protect data
* structures that it accesses by calling SDL_LockAudioDevice() and
* SDL_UnlockAudioDevice() in your code. Alternately, you may pass a NULL
* pointer here, and call SDL_QueueAudio() with some frequency, to queue
* more audio samples to be played (or for capture devices, call
* SDL_DequeueAudio() with some frequency, to obtain audio samples).
* - `desired->userdata` is passed as the first parameter to your callback
* function. If you passed a NULL callback, this value is ignored.
*
* `allowed_changes` can have the following flags OR'd together:
*
* - `SDL_AUDIO_ALLOW_FREQUENCY_CHANGE`
* - `SDL_AUDIO_ALLOW_FORMAT_CHANGE`
* - `SDL_AUDIO_ALLOW_CHANNELS_CHANGE`
* - `SDL_AUDIO_ALLOW_SAMPLES_CHANGE`
* - `SDL_AUDIO_ALLOW_ANY_CHANGE`
*
* These flags specify how SDL should behave when a device cannot offer a
* specific feature. If the application requests a feature that the hardware
* doesn't offer, SDL will always try to get the closest equivalent.
*
* For example, if you ask for float32 audio format, but the sound card only
* supports int16, SDL will set the hardware to int16. If you had set
* SDL_AUDIO_ALLOW_FORMAT_CHANGE, SDL will change the format in the `obtained`
* structure. If that flag was *not* set, SDL will prepare to convert your
* callback's float32 audio to int16 before feeding it to the hardware and
* will keep the originally requested format in the `obtained` structure.
*
* The resulting audio specs, varying depending on hardware and on what
* changes were allowed, will then be written back to `obtained`.
*
* If your application can only handle one specific data format, pass a zero
* for `allowed_changes` and let SDL transparently handle any differences.
*
* \param device a UTF-8 string reported by SDL_GetAudioDeviceName() or a
* driver-specific name as appropriate. NULL requests the most
* reasonable default device.
* \param iscapture non-zero to specify a device should be opened for
* recording, not playback
* \param desired an SDL_AudioSpec structure representing the desired output
* format; see SDL_OpenAudio() for more information
* \param obtained an SDL_AudioSpec structure filled in with the actual output
* format; see SDL_OpenAudio() for more information
* \param allowed_changes 0, or one or more flags OR'd together
* \returns a valid device ID that is > 0 on success or 0 on failure; call
* SDL_GetError() for more information.
*
* For compatibility with SDL 1.2, this will never return 1, since
* SDL reserves that ID for the legacy SDL_OpenAudio() function.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_CloseAudioDevice
* \sa SDL_GetAudioDeviceName
* \sa SDL_LockAudioDevice
* \sa SDL_OpenAudio
* \sa SDL_PauseAudioDevice
* \sa SDL_UnlockAudioDevice
*/
extern DECLSPEC SDL_AudioDeviceID SDLCALL SDL_OpenAudioDevice(
const char *device,
int iscapture,
const SDL_AudioSpec *desired,
SDL_AudioSpec *obtained,
int allowed_changes);
/**
* \name Audio state
*
* Get the current audio state.
*/
/* @{ */
typedef enum
{
SDL_AUDIO_STOPPED = 0,
SDL_AUDIO_PLAYING,
SDL_AUDIO_PAUSED
} SDL_AudioStatus;
/**
* This function is a legacy means of querying the audio device.
*
* New programs might want to use SDL_GetAudioDeviceStatus() instead. This
* function is equivalent to calling...
*
* ```c
* SDL_GetAudioDeviceStatus(1);
* ```
*
* ...and is only useful if you used the legacy SDL_OpenAudio() function.
*
* \returns the SDL_AudioStatus of the audio device opened by SDL_OpenAudio().
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetAudioDeviceStatus
*/
extern DECLSPEC SDL_AudioStatus SDLCALL SDL_GetAudioStatus(void);
/**
* Use this function to get the current audio state of an audio device.
*
* \param dev the ID of an audio device previously opened with
* SDL_OpenAudioDevice()
* \returns the SDL_AudioStatus of the specified audio device.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_PauseAudioDevice
*/
extern DECLSPEC SDL_AudioStatus SDLCALL SDL_GetAudioDeviceStatus(SDL_AudioDeviceID dev);
/* @} *//* Audio State */
/**
* \name Pause audio functions
*
* These functions pause and unpause the audio callback processing.
* They should be called with a parameter of 0 after opening the audio
* device to start playing sound. This is so you can safely initialize
* data for your callback function after opening the audio device.
* Silence will be written to the audio device during the pause.
*/
/* @{ */
/**
* This function is a legacy means of pausing the audio device.
*
* New programs might want to use SDL_PauseAudioDevice() instead. This
* function is equivalent to calling...
*
* ```c
* SDL_PauseAudioDevice(1, pause_on);
* ```
*
* ...and is only useful if you used the legacy SDL_OpenAudio() function.
*
* \param pause_on non-zero to pause, 0 to unpause
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetAudioStatus
* \sa SDL_PauseAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_PauseAudio(int pause_on);
/**
* Use this function to pause and unpause audio playback on a specified
* device.
*
* This function pauses and unpauses the audio callback processing for a given
* device. Newly-opened audio devices start in the paused state, so you must
* call this function with **pause_on**=0 after opening the specified audio
* device to start playing sound. This allows you to safely initialize data
* for your callback function after opening the audio device. Silence will be
* written to the audio device while paused, and the audio callback is
* guaranteed to not be called. Pausing one device does not prevent other
* unpaused devices from running their callbacks.
*
* Pausing state does not stack; even if you pause a device several times, a
* single unpause will start the device playing again, and vice versa. This is
* different from how SDL_LockAudioDevice() works.
*
* If you just need to protect a few variables from race conditions vs your
* callback, you shouldn't pause the audio device, as it will lead to dropouts
* in the audio playback. Instead, you should use SDL_LockAudioDevice().
*
* \param dev a device opened by SDL_OpenAudioDevice()
* \param pause_on non-zero to pause, 0 to unpause
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_LockAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_PauseAudioDevice(SDL_AudioDeviceID dev,
int pause_on);
/* @} *//* Pause audio functions */
/**
* Load the audio data of a WAVE file into memory.
*
* Loading a WAVE file requires `src`, `spec`, `audio_buf` and `audio_len` to
* be valid pointers. The entire data portion of the file is then loaded into
* memory and decoded if necessary.
*
* If `freesrc` is non-zero, the data source gets automatically closed and
* freed before the function returns.
*
* Supported formats are RIFF WAVE files with the formats PCM (8, 16, 24, and
* 32 bits), IEEE Float (32 bits), Microsoft ADPCM and IMA ADPCM (4 bits), and
* A-law and mu-law (8 bits). Other formats are currently unsupported and
* cause an error.
*
* If this function succeeds, the pointer returned by it is equal to `spec`
* and the pointer to the audio data allocated by the function is written to
* `audio_buf` and its length in bytes to `audio_len`. The SDL_AudioSpec
* members `freq`, `channels`, and `format` are set to the values of the audio
* data in the buffer. The `samples` member is set to a sane default and all
* others are set to zero.
*
* It's necessary to use SDL_FreeWAV() to free the audio data returned in
* `audio_buf` when it is no longer used.
*
* Because of the underspecification of the .WAV format, there are many
* problematic files in the wild that cause issues with strict decoders. To
* provide compatibility with these files, this decoder is lenient in regards
* to the truncation of the file, the fact chunk, and the size of the RIFF
* chunk. The hints `SDL_HINT_WAVE_RIFF_CHUNK_SIZE`,
* `SDL_HINT_WAVE_TRUNCATION`, and `SDL_HINT_WAVE_FACT_CHUNK` can be used to
* tune the behavior of the loading process.
*
* Any file that is invalid (due to truncation, corruption, or wrong values in
* the headers), too big, or unsupported causes an error. Additionally, any
* critical I/O error from the data source will terminate the loading process
* with an error. The function returns NULL on error and in all cases (with
* the exception of `src` being NULL), an appropriate error message will be
* set.
*
* It is required that the data source supports seeking.
*
* Example:
*
* ```c
* SDL_LoadWAV_RW(SDL_RWFromFile("sample.wav", "rb"), 1, &spec, &buf, &len);
* ```
*
* Note that the SDL_LoadWAV macro does this same thing for you, but in a less
* messy way:
*
* ```c
* SDL_LoadWAV("sample.wav", &spec, &buf, &len);
* ```
*
* \param src The data source for the WAVE data
* \param freesrc If non-zero, SDL will _always_ free the data source
* \param spec An SDL_AudioSpec that will be filled in with the wave file's
* format details
* \param audio_buf A pointer filled with the audio data, allocated by the
* function.
* \param audio_len A pointer filled with the length of the audio data buffer
* in bytes
* \returns This function, if successfully called, returns `spec`, which will
* be filled with the audio data format of the wave source data.
* `audio_buf` will be filled with a pointer to an allocated buffer
* containing the audio data, and `audio_len` is filled with the
* length of that audio buffer in bytes.
*
* This function returns NULL if the .WAV file cannot be opened, uses
* an unknown data format, or is corrupt; call SDL_GetError() for
* more information.
*
* When the application is done with the data returned in
* `audio_buf`, it should call SDL_FreeWAV() to dispose of it.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_FreeWAV
* \sa SDL_LoadWAV
*/
extern DECLSPEC SDL_AudioSpec *SDLCALL SDL_LoadWAV_RW(SDL_RWops * src,
int freesrc,
SDL_AudioSpec * spec,
Uint8 ** audio_buf,
Uint32 * audio_len);
/**
* Loads a WAV from a file.
* Compatibility convenience function.
*/
#define SDL_LoadWAV(file, spec, audio_buf, audio_len) \
SDL_LoadWAV_RW(SDL_RWFromFile(file, "rb"),1, spec,audio_buf,audio_len)
/**
* Free data previously allocated with SDL_LoadWAV() or SDL_LoadWAV_RW().
*
* After a WAVE file has been opened with SDL_LoadWAV() or SDL_LoadWAV_RW()
* its data can eventually be freed with SDL_FreeWAV(). It is safe to call
* this function with a NULL pointer.
*
* \param audio_buf a pointer to the buffer created by SDL_LoadWAV() or
* SDL_LoadWAV_RW()
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_LoadWAV
* \sa SDL_LoadWAV_RW
*/
extern DECLSPEC void SDLCALL SDL_FreeWAV(Uint8 * audio_buf);
/**
* Initialize an SDL_AudioCVT structure for conversion.
*
* Before an SDL_AudioCVT structure can be used to convert audio data it must
* be initialized with source and destination information.
*
* This function will zero out every field of the SDL_AudioCVT, so it must be
* called before the application fills in the final buffer information.
*
* Once this function has returned successfully, and reported that a
* conversion is necessary, the application fills in the rest of the fields in
* SDL_AudioCVT, now that it knows how large a buffer it needs to allocate,
* and then can call SDL_ConvertAudio() to complete the conversion.
*
* \param cvt an SDL_AudioCVT structure filled in with audio conversion
* information
* \param src_format the source format of the audio data; for more info see
* SDL_AudioFormat
* \param src_channels the number of channels in the source
* \param src_rate the frequency (sample-frames-per-second) of the source
* \param dst_format the destination format of the audio data; for more info
* see SDL_AudioFormat
* \param dst_channels the number of channels in the destination
* \param dst_rate the frequency (sample-frames-per-second) of the destination
* \returns 1 if the audio filter is prepared, 0 if no conversion is needed,
* or a negative error code on failure; call SDL_GetError() for more
* information.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_ConvertAudio
*/
extern DECLSPEC int SDLCALL SDL_BuildAudioCVT(SDL_AudioCVT * cvt,
SDL_AudioFormat src_format,
Uint8 src_channels,
int src_rate,
SDL_AudioFormat dst_format,
Uint8 dst_channels,
int dst_rate);
/**
* Convert audio data to a desired audio format.
*
* This function does the actual audio data conversion, after the application
* has called SDL_BuildAudioCVT() to prepare the conversion information and
* then filled in the buffer details.
*
* Once the application has initialized the `cvt` structure using
* SDL_BuildAudioCVT(), allocated an audio buffer and filled it with audio
* data in the source format, this function will convert the buffer, in-place,
* to the desired format.
*
* The data conversion may go through several passes; any given pass may
* possibly temporarily increase the size of the data. For example, SDL might
* expand 16-bit data to 32 bits before resampling to a lower frequency,
* shrinking the data size after having grown it briefly. Since the supplied
* buffer will be both the source and destination, converting as necessary
* in-place, the application must allocate a buffer that will fully contain
* the data during its largest conversion pass. After SDL_BuildAudioCVT()
* returns, the application should set the `cvt->len` field to the size, in
* bytes, of the source data, and allocate a buffer that is `cvt->len *
* cvt->len_mult` bytes long for the `buf` field.
*
* The source data should be copied into this buffer before the call to
* SDL_ConvertAudio(). Upon successful return, this buffer will contain the
* converted audio, and `cvt->len_cvt` will be the size of the converted data,
* in bytes. Any bytes in the buffer past `cvt->len_cvt` are undefined once
* this function returns.
*
* \param cvt an SDL_AudioCVT structure that was previously set up by
* SDL_BuildAudioCVT().
* \returns 0 if the conversion was completed successfully or a negative error
* code on failure; call SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_BuildAudioCVT
*/
extern DECLSPEC int SDLCALL SDL_ConvertAudio(SDL_AudioCVT * cvt);
/* SDL_AudioStream is a new audio conversion interface.
The benefits vs SDL_AudioCVT:
- it can handle resampling data in chunks without generating
artifacts, when it doesn't have the complete buffer available.
- it can handle incoming data in any variable size.
- You push data as you have it, and pull it when you need it
*/
/* this is opaque to the outside world. */
struct _SDL_AudioStream;
typedef struct _SDL_AudioStream SDL_AudioStream;
/**
* Create a new audio stream.
*
* \param src_format The format of the source audio
* \param src_channels The number of channels of the source audio
* \param src_rate The sampling rate of the source audio
* \param dst_format The format of the desired audio output
* \param dst_channels The number of channels of the desired audio output
* \param dst_rate The sampling rate of the desired audio output
* \returns 0 on success, or -1 on error.
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_AudioStreamPut
* \sa SDL_AudioStreamGet
* \sa SDL_AudioStreamAvailable
* \sa SDL_AudioStreamFlush
* \sa SDL_AudioStreamClear
* \sa SDL_FreeAudioStream
*/
extern DECLSPEC SDL_AudioStream * SDLCALL SDL_NewAudioStream(const SDL_AudioFormat src_format,
const Uint8 src_channels,
const int src_rate,
const SDL_AudioFormat dst_format,
const Uint8 dst_channels,
const int dst_rate);
/**
* Add data to be converted/resampled to the stream.
*
* \param stream The stream the audio data is being added to
* \param buf A pointer to the audio data to add
* \param len The number of bytes to write to the stream
* \returns 0 on success, or -1 on error.
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_NewAudioStream
* \sa SDL_AudioStreamGet
* \sa SDL_AudioStreamAvailable
* \sa SDL_AudioStreamFlush
* \sa SDL_AudioStreamClear
* \sa SDL_FreeAudioStream
*/
extern DECLSPEC int SDLCALL SDL_AudioStreamPut(SDL_AudioStream *stream, const void *buf, int len);
/**
* Get converted/resampled data from the stream
*
* \param stream The stream the audio is being requested from
* \param buf A buffer to fill with audio data
* \param len The maximum number of bytes to fill
* \returns the number of bytes read from the stream, or -1 on error
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_NewAudioStream
* \sa SDL_AudioStreamPut
* \sa SDL_AudioStreamAvailable
* \sa SDL_AudioStreamFlush
* \sa SDL_AudioStreamClear
* \sa SDL_FreeAudioStream
*/
extern DECLSPEC int SDLCALL SDL_AudioStreamGet(SDL_AudioStream *stream, void *buf, int len);
/**
* Get the number of converted/resampled bytes available.
*
* The stream may be buffering data behind the scenes until it has enough to
* resample correctly, so this number might be lower than what you expect, or
* even be zero. Add more data or flush the stream if you need the data now.
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_NewAudioStream
* \sa SDL_AudioStreamPut
* \sa SDL_AudioStreamGet
* \sa SDL_AudioStreamFlush
* \sa SDL_AudioStreamClear
* \sa SDL_FreeAudioStream
*/
extern DECLSPEC int SDLCALL SDL_AudioStreamAvailable(SDL_AudioStream *stream);
/**
* Tell the stream that you're done sending data, and anything being buffered
* should be converted/resampled and made available immediately.
*
* It is legal to add more data to a stream after flushing, but there will be
* audio gaps in the output. Generally this is intended to signal the end of
* input, so the complete output becomes available.
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_NewAudioStream
* \sa SDL_AudioStreamPut
* \sa SDL_AudioStreamGet
* \sa SDL_AudioStreamAvailable
* \sa SDL_AudioStreamClear
* \sa SDL_FreeAudioStream
*/
extern DECLSPEC int SDLCALL SDL_AudioStreamFlush(SDL_AudioStream *stream);
/**
* Clear any pending data in the stream without converting it
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_NewAudioStream
* \sa SDL_AudioStreamPut
* \sa SDL_AudioStreamGet
* \sa SDL_AudioStreamAvailable
* \sa SDL_AudioStreamFlush
* \sa SDL_FreeAudioStream
*/
extern DECLSPEC void SDLCALL SDL_AudioStreamClear(SDL_AudioStream *stream);
/**
* Free an audio stream
*
* \since This function is available since SDL 2.0.7.
*
* \sa SDL_NewAudioStream
* \sa SDL_AudioStreamPut
* \sa SDL_AudioStreamGet
* \sa SDL_AudioStreamAvailable
* \sa SDL_AudioStreamFlush
* \sa SDL_AudioStreamClear
*/
extern DECLSPEC void SDLCALL SDL_FreeAudioStream(SDL_AudioStream *stream);
#define SDL_MIX_MAXVOLUME 128
/**
* This function is a legacy means of mixing audio.
*
* This function is equivalent to calling...
*
* ```c
* SDL_MixAudioFormat(dst, src, format, len, volume);
* ```
*
* ...where `format` is the obtained format of the audio device from the
* legacy SDL_OpenAudio() function.
*
* \param dst the destination for the mixed audio
* \param src the source audio buffer to be mixed
* \param len the length of the audio buffer in bytes
* \param volume ranges from 0 - 128, and should be set to SDL_MIX_MAXVOLUME
* for full audio volume
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_MixAudioFormat
*/
extern DECLSPEC void SDLCALL SDL_MixAudio(Uint8 * dst, const Uint8 * src,
Uint32 len, int volume);
/**
* Mix audio data in a specified format.
*
* This takes an audio buffer `src` of `len` bytes of `format` data and mixes
* it into `dst`, performing addition, volume adjustment, and overflow
* clipping. The buffer pointed to by `dst` must also be `len` bytes of
* `format` data.
*
* This is provided for convenience -- you can mix your own audio data.
*
* Do not use this function for mixing together more than two streams of
* sample data. The output from repeated application of this function may be
* distorted by clipping, because there is no accumulator with greater range
* than the input (not to mention this being an inefficient way of doing it).
*
* It is a common misconception that this function is required to write audio
* data to an output stream in an audio callback. While you can do that,
* SDL_MixAudioFormat() is really only needed when you're mixing a single
* audio stream with a volume adjustment.
*
* \param dst the destination for the mixed audio
* \param src the source audio buffer to be mixed
* \param format the SDL_AudioFormat structure representing the desired audio
* format
* \param len the length of the audio buffer in bytes
* \param volume ranges from 0 - 128, and should be set to SDL_MIX_MAXVOLUME
* for full audio volume
*
* \since This function is available since SDL 2.0.0.
*/
extern DECLSPEC void SDLCALL SDL_MixAudioFormat(Uint8 * dst,
const Uint8 * src,
SDL_AudioFormat format,
Uint32 len, int volume);
/**
* Queue more audio on non-callback devices.
*
* If you are looking to retrieve queued audio from a non-callback capture
* device, you want SDL_DequeueAudio() instead. SDL_QueueAudio() will return
* -1 to signify an error if you use it with capture devices.
*
* SDL offers two ways to feed audio to the device: you can either supply a
* callback that SDL triggers with some frequency to obtain more audio (pull
* method), or you can supply no callback, and then SDL will expect you to
* supply data at regular intervals (push method) with this function.
*
* There are no limits on the amount of data you can queue, short of
* exhaustion of address space. Queued data will drain to the device as
* necessary without further intervention from you. If the device needs audio
* but there is not enough queued, it will play silence to make up the
* difference. This means you will have skips in your audio playback if you
* aren't routinely queueing sufficient data.
*
* This function copies the supplied data, so you are safe to free it when the
* function returns. This function is thread-safe, but queueing to the same
* device from two threads at once does not promise which buffer will be
* queued first.
*
* You may not queue audio on a device that is using an application-supplied
* callback; doing so returns an error. You have to use the audio callback or
* queue audio with this function, but not both.
*
* You should not call SDL_LockAudio() on the device before queueing; SDL
* handles locking internally for this function.
*
* Note that SDL2 does not support planar audio. You will need to resample
* from planar audio formats into a non-planar one (see SDL_AudioFormat)
* before queuing audio.
*
* \param dev the device ID to which we will queue audio
* \param data the data to queue to the device for later playback
* \param len the number of bytes (not samples!) to which `data` points
* \returns 0 on success or a negative error code on failure; call
* SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.4.
*
* \sa SDL_ClearQueuedAudio
* \sa SDL_GetQueuedAudioSize
*/
extern DECLSPEC int SDLCALL SDL_QueueAudio(SDL_AudioDeviceID dev, const void *data, Uint32 len);
/**
* Dequeue more audio on non-callback devices.
*
* If you are looking to queue audio for output on a non-callback playback
* device, you want SDL_QueueAudio() instead. SDL_DequeueAudio() will always
* return 0 if you use it with playback devices.
*
* SDL offers two ways to retrieve audio from a capture device: you can either
* supply a callback that SDL triggers with some frequency as the device
* records more audio data, (push method), or you can supply no callback, and
* then SDL will expect you to retrieve data at regular intervals (pull
* method) with this function.
*
* There are no limits on the amount of data you can queue, short of
* exhaustion of address space. Data from the device will keep queuing as
* necessary without further intervention from you. This means you will
* eventually run out of memory if you aren't routinely dequeueing data.
*
* Capture devices will not queue data when paused; if you are expecting to
* not need captured audio for some length of time, use SDL_PauseAudioDevice()
* to stop the capture device from queueing more data. This can be useful
* during, say, level loading times. When unpaused, capture devices will start
* queueing data from that point, having flushed any capturable data available
* while paused.
*
* This function is thread-safe, but dequeueing from the same device from two
* threads at once does not promise which thread will dequeue data first.
*
* You may not dequeue audio from a device that is using an
* application-supplied callback; doing so returns an error. You have to use
* the audio callback, or dequeue audio with this function, but not both.
*
* You should not call SDL_LockAudio() on the device before dequeueing; SDL
* handles locking internally for this function.
*
* \param dev the device ID from which we will dequeue audio
* \param data a pointer into where audio data should be copied
* \param len the number of bytes (not samples!) to which (data) points
* \returns the number of bytes dequeued, which could be less than requested;
* call SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.5.
*
* \sa SDL_ClearQueuedAudio
* \sa SDL_GetQueuedAudioSize
*/
extern DECLSPEC Uint32 SDLCALL SDL_DequeueAudio(SDL_AudioDeviceID dev, void *data, Uint32 len);
/**
* Get the number of bytes of still-queued audio.
*
* For playback devices: this is the number of bytes that have been queued for
* playback with SDL_QueueAudio(), but have not yet been sent to the hardware.
*
* Once we've sent it to the hardware, this function can not decide the exact
* byte boundary of what has been played. It's possible that we just gave the
* hardware several kilobytes right before you called this function, but it
* hasn't played any of it yet, or maybe half of it, etc.
*
* For capture devices, this is the number of bytes that have been captured by
* the device and are waiting for you to dequeue. This number may grow at any
* time, so this only informs of the lower-bound of available data.
*
* You may not queue or dequeue audio on a device that is using an
* application-supplied callback; calling this function on such a device
* always returns 0. You have to use the audio callback or queue audio, but
* not both.
*
* You should not call SDL_LockAudio() on the device before querying; SDL
* handles locking internally for this function.
*
* \param dev the device ID of which we will query queued audio size
* \returns the number of bytes (not samples!) of queued audio.
*
* \since This function is available since SDL 2.0.4.
*
* \sa SDL_ClearQueuedAudio
* \sa SDL_QueueAudio
* \sa SDL_DequeueAudio
*/
extern DECLSPEC Uint32 SDLCALL SDL_GetQueuedAudioSize(SDL_AudioDeviceID dev);
/**
* Drop any queued audio data waiting to be sent to the hardware.
*
* Immediately after this call, SDL_GetQueuedAudioSize() will return 0. For
* output devices, the hardware will start playing silence if more audio isn't
* queued. For capture devices, the hardware will start filling the empty
* queue with new data if the capture device isn't paused.
*
* This will not prevent playback of queued audio that's already been sent to
* the hardware, as we can not undo that, so expect there to be some fraction
* of a second of audio that might still be heard. This can be useful if you
* want to, say, drop any pending music or any unprocessed microphone input
* during a level change in your game.
*
* You may not queue or dequeue audio on a device that is using an
* application-supplied callback; calling this function on such a device
* always returns 0. You have to use the audio callback or queue audio, but
* not both.
*
* You should not call SDL_LockAudio() on the device before clearing the
* queue; SDL handles locking internally for this function.
*
* This function always succeeds and thus returns void.
*
* \param dev the device ID of which to clear the audio queue
*
* \since This function is available since SDL 2.0.4.
*
* \sa SDL_GetQueuedAudioSize
* \sa SDL_QueueAudio
* \sa SDL_DequeueAudio
*/
extern DECLSPEC void SDLCALL SDL_ClearQueuedAudio(SDL_AudioDeviceID dev);
/**
* \name Audio lock functions
*
* The lock manipulated by these functions protects the callback function.
* During a SDL_LockAudio()/SDL_UnlockAudio() pair, you can be guaranteed that
* the callback function is not running. Do not call these from the callback
* function or you will cause deadlock.
*/
/* @{ */
/**
* This function is a legacy means of locking the audio device.
*
* New programs might want to use SDL_LockAudioDevice() instead. This function
* is equivalent to calling...
*
* ```c
* SDL_LockAudioDevice(1);
* ```
*
* ...and is only useful if you used the legacy SDL_OpenAudio() function.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_LockAudioDevice
* \sa SDL_UnlockAudio
* \sa SDL_UnlockAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_LockAudio(void);
/**
* Use this function to lock out the audio callback function for a specified
* device.
*
* The lock manipulated by these functions protects the audio callback
* function specified in SDL_OpenAudioDevice(). During a
* SDL_LockAudioDevice()/SDL_UnlockAudioDevice() pair, you can be guaranteed
* that the callback function for that device is not running, even if the
* device is not paused. While a device is locked, any other unpaused,
* unlocked devices may still run their callbacks.
*
* Calling this function from inside your audio callback is unnecessary. SDL
* obtains this lock before calling your function, and releases it when the
* function returns.
*
* You should not hold the lock longer than absolutely necessary. If you hold
* it too long, you'll experience dropouts in your audio playback. Ideally,
* your application locks the device, sets a few variables and unlocks again.
* Do not do heavy work while holding the lock for a device.
*
* It is safe to lock the audio device multiple times, as long as you unlock
* it an equivalent number of times. The callback will not run until the
* device has been unlocked completely in this way. If your application fails
* to unlock the device appropriately, your callback will never run, you might
* hear repeating bursts of audio, and SDL_CloseAudioDevice() will probably
* deadlock.
*
* Internally, the audio device lock is a mutex; if you lock from two threads
* at once, not only will you block the audio callback, you'll block the other
* thread.
*
* \param dev the ID of the device to be locked
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_UnlockAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_LockAudioDevice(SDL_AudioDeviceID dev);
/**
* This function is a legacy means of unlocking the audio device.
*
* New programs might want to use SDL_UnlockAudioDevice() instead. This
* function is equivalent to calling...
*
* ```c
* SDL_UnlockAudioDevice(1);
* ```
*
* ...and is only useful if you used the legacy SDL_OpenAudio() function.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_LockAudio
* \sa SDL_UnlockAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_UnlockAudio(void);
/**
* Use this function to unlock the audio callback function for a specified
* device.
*
* This function should be paired with a previous SDL_LockAudioDevice() call.
*
* \param dev the ID of the device to be unlocked
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_LockAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_UnlockAudioDevice(SDL_AudioDeviceID dev);
/* @} *//* Audio lock functions */
/**
* This function is a legacy means of closing the audio device.
*
* This function is equivalent to calling...
*
* ```c
* SDL_CloseAudioDevice(1);
* ```
*
* ...and is only useful if you used the legacy SDL_OpenAudio() function.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_OpenAudio
*/
extern DECLSPEC void SDLCALL SDL_CloseAudio(void);
/**
* Use this function to shut down audio processing and close the audio device.
*
* The application should close open audio devices once they are no longer
* needed. Calling this function will wait until the device's audio callback
* is not running, release the audio hardware and then clean up internal
* state. No further audio will play from this device once this function
* returns.
*
* This function may block briefly while pending audio data is played by the
* hardware, so that applications don't drop the last buffer of data they
* supplied.
*
* The device ID is invalid as soon as the device is closed, and is eligible
* for reuse in a new SDL_OpenAudioDevice() call immediately.
*
* \param dev an audio device previously opened with SDL_OpenAudioDevice()
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_OpenAudioDevice
*/
extern DECLSPEC void SDLCALL SDL_CloseAudioDevice(SDL_AudioDeviceID dev);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_audio_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_opengles2_gl2platform.h | #ifndef __gl2platform_h_
#define __gl2platform_h_
/*
** Copyright 2017-2020 The Khronos Group Inc.
** SPDX-License-Identifier: Apache-2.0
*/
/* Platform-specific types and definitions for OpenGL ES 2.X gl2.h
*
* Adopters may modify khrplatform.h and this file to suit their platform.
* Please contribute modifications back to Khronos as pull requests on the
* public github repository:
* https://github.com/KhronosGroup/OpenGL-Registry
*/
/*#include <KHR/khrplatform.h>*/
#ifndef GL_APICALL
#define GL_APICALL KHRONOS_APICALL
#endif
#ifndef GL_APIENTRY
#define GL_APIENTRY KHRONOS_APIENTRY
#endif
#endif /* __gl2platform_h_ */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_error.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_error.h
*
* Simple error message routines for SDL.
*/
#ifndef SDL_error_h_
#define SDL_error_h_
#include "SDL_stdinc.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/* Public functions */
/**
* Set the SDL error message for the current thread.
*
* Calling this function will replace any previous error message that was set.
*
* This function always returns -1, since SDL frequently uses -1 to signify an
* failing result, leading to this idiom:
*
* ```c
* if (error_code) {
* return SDL_SetError("This operation has failed: %d", error_code);
* }
* ```
*
* \param fmt a printf()-style message format string
* \param ... additional parameters matching % tokens in the `fmt` string, if
* any
* \returns always -1.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_ClearError
* \sa SDL_GetError
*/
extern DECLSPEC int SDLCALL SDL_SetError(SDL_PRINTF_FORMAT_STRING const char *fmt, ...) SDL_PRINTF_VARARG_FUNC(1);
/**
* Retrieve a message about the last error that occurred on the current
* thread.
*
* It is possible for multiple errors to occur before calling SDL_GetError().
* Only the last error is returned.
*
* The message is only applicable when an SDL function has signaled an error.
* You must check the return values of SDL function calls to determine when to
* appropriately call SDL_GetError(). You should *not* use the results of
* SDL_GetError() to decide if an error has occurred! Sometimes SDL will set
* an error string even when reporting success.
*
* SDL will *not* clear the error string for successful API calls. You *must*
* check return values for failure cases before you can assume the error
* string applies.
*
* Error strings are set per-thread, so an error set in a different thread
* will not interfere with the current thread's operation.
*
* The returned string is internally allocated and must not be freed by the
* application.
*
* \returns a message with information about the specific error that occurred,
* or an empty string if there hasn't been an error message set since
* the last call to SDL_ClearError(). The message is only applicable
* when an SDL function has signaled an error. You must check the
* return values of SDL function calls to determine when to
* appropriately call SDL_GetError().
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_ClearError
* \sa SDL_SetError
*/
extern DECLSPEC const char *SDLCALL SDL_GetError(void);
/**
* Get the last error message that was set for the current thread.
*
* This allows the caller to copy the error string into a provided buffer, but
* otherwise operates exactly the same as SDL_GetError().
*
* \param errstr A buffer to fill with the last error message that was set for
* the current thread
* \param maxlen The size of the buffer pointed to by the errstr parameter
* \returns the pointer passed in as the `errstr` parameter.
*
* \since This function is available since SDL 2.0.14.
*
* \sa SDL_GetError
*/
extern DECLSPEC char * SDLCALL SDL_GetErrorMsg(char *errstr, int maxlen);
/**
* Clear any previous error message for this thread.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetError
* \sa SDL_SetError
*/
extern DECLSPEC void SDLCALL SDL_ClearError(void);
/**
* \name Internal error functions
*
* \internal
* Private error reporting function - used internally.
*/
/* @{ */
#define SDL_OutOfMemory() SDL_Error(SDL_ENOMEM)
#define SDL_Unsupported() SDL_Error(SDL_UNSUPPORTED)
#define SDL_InvalidParamError(param) SDL_SetError("Parameter '%s' is invalid", (param))
typedef enum
{
SDL_ENOMEM,
SDL_EFREAD,
SDL_EFWRITE,
SDL_EFSEEK,
SDL_UNSUPPORTED,
SDL_LASTERROR
} SDL_errorcode;
/* SDL_Error() unconditionally returns -1. */
extern DECLSPEC int SDLCALL SDL_Error(SDL_errorcode code);
/* @} *//* Internal error functions */
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_error_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_opengles2_gl2ext.h | #ifndef __gles2_gl2ext_h_
#define __gles2_gl2ext_h_ 1
#ifdef __cplusplus
extern "C" {
#endif
/*
** Copyright 2013-2020 The Khronos Group Inc.
** SPDX-License-Identifier: MIT
**
** This header is generated from the Khronos OpenGL / OpenGL ES XML
** API Registry. The current version of the Registry, generator scripts
** used to make the header, and the header can be found at
** https://github.com/KhronosGroup/OpenGL-Registry
*/
#ifndef GL_APIENTRYP
#define GL_APIENTRYP GL_APIENTRY*
#endif
/* Generated on date 20220530 */
/* Generated C header for:
* API: gles2
* Profile: common
* Versions considered: 2\.[0-9]
* Versions emitted: _nomatch_^
* Default extensions included: gles2
* Additional extensions included: _nomatch_^
* Extensions removed: _nomatch_^
*/
#ifndef GL_KHR_blend_equation_advanced
#define GL_KHR_blend_equation_advanced 1
#define GL_MULTIPLY_KHR 0x9294
#define GL_SCREEN_KHR 0x9295
#define GL_OVERLAY_KHR 0x9296
#define GL_DARKEN_KHR 0x9297
#define GL_LIGHTEN_KHR 0x9298
#define GL_COLORDODGE_KHR 0x9299
#define GL_COLORBURN_KHR 0x929A
#define GL_HARDLIGHT_KHR 0x929B
#define GL_SOFTLIGHT_KHR 0x929C
#define GL_DIFFERENCE_KHR 0x929E
#define GL_EXCLUSION_KHR 0x92A0
#define GL_HSL_HUE_KHR 0x92AD
#define GL_HSL_SATURATION_KHR 0x92AE
#define GL_HSL_COLOR_KHR 0x92AF
#define GL_HSL_LUMINOSITY_KHR 0x92B0
typedef void (GL_APIENTRYP PFNGLBLENDBARRIERKHRPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBlendBarrierKHR (void);
#endif
#endif /* GL_KHR_blend_equation_advanced */
#ifndef GL_KHR_blend_equation_advanced_coherent
#define GL_KHR_blend_equation_advanced_coherent 1
#define GL_BLEND_ADVANCED_COHERENT_KHR 0x9285
#endif /* GL_KHR_blend_equation_advanced_coherent */
#ifndef GL_KHR_context_flush_control
#define GL_KHR_context_flush_control 1
#define GL_CONTEXT_RELEASE_BEHAVIOR_KHR 0x82FB
#define GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR 0x82FC
#endif /* GL_KHR_context_flush_control */
#ifndef GL_KHR_debug
#define GL_KHR_debug 1
typedef void (GL_APIENTRY *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
#define GL_SAMPLER 0x82E6
#define GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR 0x8242
#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_KHR 0x8243
#define GL_DEBUG_CALLBACK_FUNCTION_KHR 0x8244
#define GL_DEBUG_CALLBACK_USER_PARAM_KHR 0x8245
#define GL_DEBUG_SOURCE_API_KHR 0x8246
#define GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR 0x8247
#define GL_DEBUG_SOURCE_SHADER_COMPILER_KHR 0x8248
#define GL_DEBUG_SOURCE_THIRD_PARTY_KHR 0x8249
#define GL_DEBUG_SOURCE_APPLICATION_KHR 0x824A
#define GL_DEBUG_SOURCE_OTHER_KHR 0x824B
#define GL_DEBUG_TYPE_ERROR_KHR 0x824C
#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR 0x824D
#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR 0x824E
#define GL_DEBUG_TYPE_PORTABILITY_KHR 0x824F
#define GL_DEBUG_TYPE_PERFORMANCE_KHR 0x8250
#define GL_DEBUG_TYPE_OTHER_KHR 0x8251
#define GL_DEBUG_TYPE_MARKER_KHR 0x8268
#define GL_DEBUG_TYPE_PUSH_GROUP_KHR 0x8269
#define GL_DEBUG_TYPE_POP_GROUP_KHR 0x826A
#define GL_DEBUG_SEVERITY_NOTIFICATION_KHR 0x826B
#define GL_MAX_DEBUG_GROUP_STACK_DEPTH_KHR 0x826C
#define GL_DEBUG_GROUP_STACK_DEPTH_KHR 0x826D
#define GL_BUFFER_KHR 0x82E0
#define GL_SHADER_KHR 0x82E1
#define GL_PROGRAM_KHR 0x82E2
#define GL_VERTEX_ARRAY_KHR 0x8074
#define GL_QUERY_KHR 0x82E3
#define GL_PROGRAM_PIPELINE_KHR 0x82E4
#define GL_SAMPLER_KHR 0x82E6
#define GL_MAX_LABEL_LENGTH_KHR 0x82E8
#define GL_MAX_DEBUG_MESSAGE_LENGTH_KHR 0x9143
#define GL_MAX_DEBUG_LOGGED_MESSAGES_KHR 0x9144
#define GL_DEBUG_LOGGED_MESSAGES_KHR 0x9145
#define GL_DEBUG_SEVERITY_HIGH_KHR 0x9146
#define GL_DEBUG_SEVERITY_MEDIUM_KHR 0x9147
#define GL_DEBUG_SEVERITY_LOW_KHR 0x9148
#define GL_DEBUG_OUTPUT_KHR 0x92E0
#define GL_CONTEXT_FLAG_DEBUG_BIT_KHR 0x00000002
#define GL_STACK_OVERFLOW_KHR 0x0503
#define GL_STACK_UNDERFLOW_KHR 0x0504
typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECONTROLKHRPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGEINSERTKHRPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECALLBACKKHRPROC) (GLDEBUGPROCKHR callback, const void *userParam);
typedef GLuint (GL_APIENTRYP PFNGLGETDEBUGMESSAGELOGKHRPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
typedef void (GL_APIENTRYP PFNGLPUSHDEBUGGROUPKHRPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message);
typedef void (GL_APIENTRYP PFNGLPOPDEBUGGROUPKHRPROC) (void);
typedef void (GL_APIENTRYP PFNGLOBJECTLABELKHRPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELKHRPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label);
typedef void (GL_APIENTRYP PFNGLOBJECTPTRLABELKHRPROC) (const void *ptr, GLsizei length, const GLchar *label);
typedef void (GL_APIENTRYP PFNGLGETOBJECTPTRLABELKHRPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label);
typedef void (GL_APIENTRYP PFNGLGETPOINTERVKHRPROC) (GLenum pname, void **params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDebugMessageControlKHR (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
GL_APICALL void GL_APIENTRY glDebugMessageInsertKHR (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
GL_APICALL void GL_APIENTRY glDebugMessageCallbackKHR (GLDEBUGPROCKHR callback, const void *userParam);
GL_APICALL GLuint GL_APIENTRY glGetDebugMessageLogKHR (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
GL_APICALL void GL_APIENTRY glPushDebugGroupKHR (GLenum source, GLuint id, GLsizei length, const GLchar *message);
GL_APICALL void GL_APIENTRY glPopDebugGroupKHR (void);
GL_APICALL void GL_APIENTRY glObjectLabelKHR (GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
GL_APICALL void GL_APIENTRY glGetObjectLabelKHR (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label);
GL_APICALL void GL_APIENTRY glObjectPtrLabelKHR (const void *ptr, GLsizei length, const GLchar *label);
GL_APICALL void GL_APIENTRY glGetObjectPtrLabelKHR (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label);
GL_APICALL void GL_APIENTRY glGetPointervKHR (GLenum pname, void **params);
#endif
#endif /* GL_KHR_debug */
#ifndef GL_KHR_no_error
#define GL_KHR_no_error 1
#define GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR 0x00000008
#endif /* GL_KHR_no_error */
#ifndef GL_KHR_parallel_shader_compile
#define GL_KHR_parallel_shader_compile 1
#define GL_MAX_SHADER_COMPILER_THREADS_KHR 0x91B0
#define GL_COMPLETION_STATUS_KHR 0x91B1
typedef void (GL_APIENTRYP PFNGLMAXSHADERCOMPILERTHREADSKHRPROC) (GLuint count);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glMaxShaderCompilerThreadsKHR (GLuint count);
#endif
#endif /* GL_KHR_parallel_shader_compile */
#ifndef GL_KHR_robust_buffer_access_behavior
#define GL_KHR_robust_buffer_access_behavior 1
#endif /* GL_KHR_robust_buffer_access_behavior */
#ifndef GL_KHR_robustness
#define GL_KHR_robustness 1
#define GL_CONTEXT_ROBUST_ACCESS_KHR 0x90F3
#define GL_LOSE_CONTEXT_ON_RESET_KHR 0x8252
#define GL_GUILTY_CONTEXT_RESET_KHR 0x8253
#define GL_INNOCENT_CONTEXT_RESET_KHR 0x8254
#define GL_UNKNOWN_CONTEXT_RESET_KHR 0x8255
#define GL_RESET_NOTIFICATION_STRATEGY_KHR 0x8256
#define GL_NO_RESET_NOTIFICATION_KHR 0x8261
#define GL_CONTEXT_LOST_KHR 0x0507
typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSKHRPROC) (void);
typedef void (GL_APIENTRYP PFNGLREADNPIXELSKHRPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETNUNIFORMUIVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void);
GL_APICALL void GL_APIENTRY glReadnPixelsKHR (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
GL_APICALL void GL_APIENTRY glGetnUniformfvKHR (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
GL_APICALL void GL_APIENTRY glGetnUniformivKHR (GLuint program, GLint location, GLsizei bufSize, GLint *params);
GL_APICALL void GL_APIENTRY glGetnUniformuivKHR (GLuint program, GLint location, GLsizei bufSize, GLuint *params);
#endif
#endif /* GL_KHR_robustness */
#ifndef GL_KHR_shader_subgroup
#define GL_KHR_shader_subgroup 1
#define GL_SUBGROUP_SIZE_KHR 0x9532
#define GL_SUBGROUP_SUPPORTED_STAGES_KHR 0x9533
#define GL_SUBGROUP_SUPPORTED_FEATURES_KHR 0x9534
#define GL_SUBGROUP_QUAD_ALL_STAGES_KHR 0x9535
#define GL_SUBGROUP_FEATURE_BASIC_BIT_KHR 0x00000001
#define GL_SUBGROUP_FEATURE_VOTE_BIT_KHR 0x00000002
#define GL_SUBGROUP_FEATURE_ARITHMETIC_BIT_KHR 0x00000004
#define GL_SUBGROUP_FEATURE_BALLOT_BIT_KHR 0x00000008
#define GL_SUBGROUP_FEATURE_SHUFFLE_BIT_KHR 0x00000010
#define GL_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT_KHR 0x00000020
#define GL_SUBGROUP_FEATURE_CLUSTERED_BIT_KHR 0x00000040
#define GL_SUBGROUP_FEATURE_QUAD_BIT_KHR 0x00000080
#endif /* GL_KHR_shader_subgroup */
#ifndef GL_KHR_texture_compression_astc_hdr
#define GL_KHR_texture_compression_astc_hdr 1
#define GL_COMPRESSED_RGBA_ASTC_4x4_KHR 0x93B0
#define GL_COMPRESSED_RGBA_ASTC_5x4_KHR 0x93B1
#define GL_COMPRESSED_RGBA_ASTC_5x5_KHR 0x93B2
#define GL_COMPRESSED_RGBA_ASTC_6x5_KHR 0x93B3
#define GL_COMPRESSED_RGBA_ASTC_6x6_KHR 0x93B4
#define GL_COMPRESSED_RGBA_ASTC_8x5_KHR 0x93B5
#define GL_COMPRESSED_RGBA_ASTC_8x6_KHR 0x93B6
#define GL_COMPRESSED_RGBA_ASTC_8x8_KHR 0x93B7
#define GL_COMPRESSED_RGBA_ASTC_10x5_KHR 0x93B8
#define GL_COMPRESSED_RGBA_ASTC_10x6_KHR 0x93B9
#define GL_COMPRESSED_RGBA_ASTC_10x8_KHR 0x93BA
#define GL_COMPRESSED_RGBA_ASTC_10x10_KHR 0x93BB
#define GL_COMPRESSED_RGBA_ASTC_12x10_KHR 0x93BC
#define GL_COMPRESSED_RGBA_ASTC_12x12_KHR 0x93BD
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR 0x93D0
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR 0x93D1
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR 0x93D2
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR 0x93D3
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR 0x93D4
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR 0x93D5
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR 0x93D6
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR 0x93D7
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR 0x93D8
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR 0x93D9
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR 0x93DA
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR 0x93DB
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR 0x93DC
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR 0x93DD
#endif /* GL_KHR_texture_compression_astc_hdr */
#ifndef GL_KHR_texture_compression_astc_ldr
#define GL_KHR_texture_compression_astc_ldr 1
#endif /* GL_KHR_texture_compression_astc_ldr */
#ifndef GL_KHR_texture_compression_astc_sliced_3d
#define GL_KHR_texture_compression_astc_sliced_3d 1
#endif /* GL_KHR_texture_compression_astc_sliced_3d */
#ifndef GL_OES_EGL_image
#define GL_OES_EGL_image 1
typedef void *GLeglImageOES;
typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) (GLenum target, GLeglImageOES image);
typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLeglImageOES image);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glEGLImageTargetTexture2DOES (GLenum target, GLeglImageOES image);
GL_APICALL void GL_APIENTRY glEGLImageTargetRenderbufferStorageOES (GLenum target, GLeglImageOES image);
#endif
#endif /* GL_OES_EGL_image */
#ifndef GL_OES_EGL_image_external
#define GL_OES_EGL_image_external 1
#define GL_TEXTURE_EXTERNAL_OES 0x8D65
#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67
#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68
#define GL_SAMPLER_EXTERNAL_OES 0x8D66
#endif /* GL_OES_EGL_image_external */
#ifndef GL_OES_EGL_image_external_essl3
#define GL_OES_EGL_image_external_essl3 1
#endif /* GL_OES_EGL_image_external_essl3 */
#ifndef GL_OES_compressed_ETC1_RGB8_sub_texture
#define GL_OES_compressed_ETC1_RGB8_sub_texture 1
#endif /* GL_OES_compressed_ETC1_RGB8_sub_texture */
#ifndef GL_OES_compressed_ETC1_RGB8_texture
#define GL_OES_compressed_ETC1_RGB8_texture 1
#define GL_ETC1_RGB8_OES 0x8D64
#endif /* GL_OES_compressed_ETC1_RGB8_texture */
#ifndef GL_OES_compressed_paletted_texture
#define GL_OES_compressed_paletted_texture 1
#define GL_PALETTE4_RGB8_OES 0x8B90
#define GL_PALETTE4_RGBA8_OES 0x8B91
#define GL_PALETTE4_R5_G6_B5_OES 0x8B92
#define GL_PALETTE4_RGBA4_OES 0x8B93
#define GL_PALETTE4_RGB5_A1_OES 0x8B94
#define GL_PALETTE8_RGB8_OES 0x8B95
#define GL_PALETTE8_RGBA8_OES 0x8B96
#define GL_PALETTE8_R5_G6_B5_OES 0x8B97
#define GL_PALETTE8_RGBA4_OES 0x8B98
#define GL_PALETTE8_RGB5_A1_OES 0x8B99
#endif /* GL_OES_compressed_paletted_texture */
#ifndef GL_OES_copy_image
#define GL_OES_copy_image 1
typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAOESPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCopyImageSubDataOES (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
#endif
#endif /* GL_OES_copy_image */
#ifndef GL_OES_depth24
#define GL_OES_depth24 1
#define GL_DEPTH_COMPONENT24_OES 0x81A6
#endif /* GL_OES_depth24 */
#ifndef GL_OES_depth32
#define GL_OES_depth32 1
#define GL_DEPTH_COMPONENT32_OES 0x81A7
#endif /* GL_OES_depth32 */
#ifndef GL_OES_depth_texture
#define GL_OES_depth_texture 1
#endif /* GL_OES_depth_texture */
#ifndef GL_OES_draw_buffers_indexed
#define GL_OES_draw_buffers_indexed 1
#define GL_MIN 0x8007
#define GL_MAX 0x8008
typedef void (GL_APIENTRYP PFNGLENABLEIOESPROC) (GLenum target, GLuint index);
typedef void (GL_APIENTRYP PFNGLDISABLEIOESPROC) (GLenum target, GLuint index);
typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIOESPROC) (GLuint buf, GLenum mode);
typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIOESPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
typedef void (GL_APIENTRYP PFNGLBLENDFUNCIOESPROC) (GLuint buf, GLenum src, GLenum dst);
typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIOESPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
typedef void (GL_APIENTRYP PFNGLCOLORMASKIOESPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIOESPROC) (GLenum target, GLuint index);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glEnableiOES (GLenum target, GLuint index);
GL_APICALL void GL_APIENTRY glDisableiOES (GLenum target, GLuint index);
GL_APICALL void GL_APIENTRY glBlendEquationiOES (GLuint buf, GLenum mode);
GL_APICALL void GL_APIENTRY glBlendEquationSeparateiOES (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
GL_APICALL void GL_APIENTRY glBlendFunciOES (GLuint buf, GLenum src, GLenum dst);
GL_APICALL void GL_APIENTRY glBlendFuncSeparateiOES (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
GL_APICALL void GL_APIENTRY glColorMaskiOES (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
GL_APICALL GLboolean GL_APIENTRY glIsEnablediOES (GLenum target, GLuint index);
#endif
#endif /* GL_OES_draw_buffers_indexed */
#ifndef GL_OES_draw_elements_base_vertex
#define GL_OES_draw_elements_base_vertex 1
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXOESPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXOESPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXOESPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount, const GLint *basevertex);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawElementsBaseVertexOES (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertexOES (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexOES (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
GL_APICALL void GL_APIENTRY glMultiDrawElementsBaseVertexEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount, const GLint *basevertex);
#endif
#endif /* GL_OES_draw_elements_base_vertex */
#ifndef GL_OES_element_index_uint
#define GL_OES_element_index_uint 1
#endif /* GL_OES_element_index_uint */
#ifndef GL_OES_fbo_render_mipmap
#define GL_OES_fbo_render_mipmap 1
#endif /* GL_OES_fbo_render_mipmap */
#ifndef GL_OES_fragment_precision_high
#define GL_OES_fragment_precision_high 1
#endif /* GL_OES_fragment_precision_high */
#ifndef GL_OES_geometry_point_size
#define GL_OES_geometry_point_size 1
#endif /* GL_OES_geometry_point_size */
#ifndef GL_OES_geometry_shader
#define GL_OES_geometry_shader 1
#define GL_GEOMETRY_SHADER_OES 0x8DD9
#define GL_GEOMETRY_SHADER_BIT_OES 0x00000004
#define GL_GEOMETRY_LINKED_VERTICES_OUT_OES 0x8916
#define GL_GEOMETRY_LINKED_INPUT_TYPE_OES 0x8917
#define GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES 0x8918
#define GL_GEOMETRY_SHADER_INVOCATIONS_OES 0x887F
#define GL_LAYER_PROVOKING_VERTEX_OES 0x825E
#define GL_LINES_ADJACENCY_OES 0x000A
#define GL_LINE_STRIP_ADJACENCY_OES 0x000B
#define GL_TRIANGLES_ADJACENCY_OES 0x000C
#define GL_TRIANGLE_STRIP_ADJACENCY_OES 0x000D
#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES 0x8DDF
#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES 0x8A2C
#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES 0x8A32
#define GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES 0x9123
#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES 0x9124
#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES 0x8DE0
#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES 0x8DE1
#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES 0x8E5A
#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES 0x8C29
#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES 0x92CF
#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES 0x92D5
#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES 0x90CD
#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES 0x90D7
#define GL_FIRST_VERTEX_CONVENTION_OES 0x8E4D
#define GL_LAST_VERTEX_CONVENTION_OES 0x8E4E
#define GL_UNDEFINED_VERTEX_OES 0x8260
#define GL_PRIMITIVES_GENERATED_OES 0x8C87
#define GL_FRAMEBUFFER_DEFAULT_LAYERS_OES 0x9312
#define GL_MAX_FRAMEBUFFER_LAYERS_OES 0x9317
#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES 0x8DA8
#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES 0x8DA7
#define GL_REFERENCED_BY_GEOMETRY_SHADER_OES 0x9309
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREOESPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferTextureOES (GLenum target, GLenum attachment, GLuint texture, GLint level);
#endif
#endif /* GL_OES_geometry_shader */
#ifndef GL_OES_get_program_binary
#define GL_OES_get_program_binary 1
#define GL_PROGRAM_BINARY_LENGTH_OES 0x8741
#define GL_NUM_PROGRAM_BINARY_FORMATS_OES 0x87FE
#define GL_PROGRAM_BINARY_FORMATS_OES 0x87FF
typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYOESPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary);
typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYOESPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLint length);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetProgramBinaryOES (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary);
GL_APICALL void GL_APIENTRY glProgramBinaryOES (GLuint program, GLenum binaryFormat, const void *binary, GLint length);
#endif
#endif /* GL_OES_get_program_binary */
#ifndef GL_OES_gpu_shader5
#define GL_OES_gpu_shader5 1
#endif /* GL_OES_gpu_shader5 */
#ifndef GL_OES_mapbuffer
#define GL_OES_mapbuffer 1
#define GL_WRITE_ONLY_OES 0x88B9
#define GL_BUFFER_ACCESS_OES 0x88BB
#define GL_BUFFER_MAPPED_OES 0x88BC
#define GL_BUFFER_MAP_POINTER_OES 0x88BD
typedef void *(GL_APIENTRYP PFNGLMAPBUFFEROESPROC) (GLenum target, GLenum access);
typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFEROESPROC) (GLenum target);
typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVOESPROC) (GLenum target, GLenum pname, void **params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void *GL_APIENTRY glMapBufferOES (GLenum target, GLenum access);
GL_APICALL GLboolean GL_APIENTRY glUnmapBufferOES (GLenum target);
GL_APICALL void GL_APIENTRY glGetBufferPointervOES (GLenum target, GLenum pname, void **params);
#endif
#endif /* GL_OES_mapbuffer */
#ifndef GL_OES_packed_depth_stencil
#define GL_OES_packed_depth_stencil 1
#define GL_DEPTH_STENCIL_OES 0x84F9
#define GL_UNSIGNED_INT_24_8_OES 0x84FA
#define GL_DEPTH24_STENCIL8_OES 0x88F0
#endif /* GL_OES_packed_depth_stencil */
#ifndef GL_OES_primitive_bounding_box
#define GL_OES_primitive_bounding_box 1
#define GL_PRIMITIVE_BOUNDING_BOX_OES 0x92BE
typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXOESPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glPrimitiveBoundingBoxOES (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
#endif
#endif /* GL_OES_primitive_bounding_box */
#ifndef GL_OES_required_internalformat
#define GL_OES_required_internalformat 1
#define GL_ALPHA8_OES 0x803C
#define GL_DEPTH_COMPONENT16_OES 0x81A5
#define GL_LUMINANCE4_ALPHA4_OES 0x8043
#define GL_LUMINANCE8_ALPHA8_OES 0x8045
#define GL_LUMINANCE8_OES 0x8040
#define GL_RGBA4_OES 0x8056
#define GL_RGB5_A1_OES 0x8057
#define GL_RGB565_OES 0x8D62
#define GL_RGB8_OES 0x8051
#define GL_RGBA8_OES 0x8058
#define GL_RGB10_EXT 0x8052
#define GL_RGB10_A2_EXT 0x8059
#endif /* GL_OES_required_internalformat */
#ifndef GL_OES_rgb8_rgba8
#define GL_OES_rgb8_rgba8 1
#endif /* GL_OES_rgb8_rgba8 */
#ifndef GL_OES_sample_shading
#define GL_OES_sample_shading 1
#define GL_SAMPLE_SHADING_OES 0x8C36
#define GL_MIN_SAMPLE_SHADING_VALUE_OES 0x8C37
typedef void (GL_APIENTRYP PFNGLMINSAMPLESHADINGOESPROC) (GLfloat value);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glMinSampleShadingOES (GLfloat value);
#endif
#endif /* GL_OES_sample_shading */
#ifndef GL_OES_sample_variables
#define GL_OES_sample_variables 1
#endif /* GL_OES_sample_variables */
#ifndef GL_OES_shader_image_atomic
#define GL_OES_shader_image_atomic 1
#endif /* GL_OES_shader_image_atomic */
#ifndef GL_OES_shader_io_blocks
#define GL_OES_shader_io_blocks 1
#endif /* GL_OES_shader_io_blocks */
#ifndef GL_OES_shader_multisample_interpolation
#define GL_OES_shader_multisample_interpolation 1
#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_OES 0x8E5B
#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_OES 0x8E5C
#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS_OES 0x8E5D
#endif /* GL_OES_shader_multisample_interpolation */
#ifndef GL_OES_standard_derivatives
#define GL_OES_standard_derivatives 1
#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES 0x8B8B
#endif /* GL_OES_standard_derivatives */
#ifndef GL_OES_stencil1
#define GL_OES_stencil1 1
#define GL_STENCIL_INDEX1_OES 0x8D46
#endif /* GL_OES_stencil1 */
#ifndef GL_OES_stencil4
#define GL_OES_stencil4 1
#define GL_STENCIL_INDEX4_OES 0x8D47
#endif /* GL_OES_stencil4 */
#ifndef GL_OES_surfaceless_context
#define GL_OES_surfaceless_context 1
#define GL_FRAMEBUFFER_UNDEFINED_OES 0x8219
#endif /* GL_OES_surfaceless_context */
#ifndef GL_OES_tessellation_point_size
#define GL_OES_tessellation_point_size 1
#endif /* GL_OES_tessellation_point_size */
#ifndef GL_OES_tessellation_shader
#define GL_OES_tessellation_shader 1
#define GL_PATCHES_OES 0x000E
#define GL_PATCH_VERTICES_OES 0x8E72
#define GL_TESS_CONTROL_OUTPUT_VERTICES_OES 0x8E75
#define GL_TESS_GEN_MODE_OES 0x8E76
#define GL_TESS_GEN_SPACING_OES 0x8E77
#define GL_TESS_GEN_VERTEX_ORDER_OES 0x8E78
#define GL_TESS_GEN_POINT_MODE_OES 0x8E79
#define GL_ISOLINES_OES 0x8E7A
#define GL_QUADS_OES 0x0007
#define GL_FRACTIONAL_ODD_OES 0x8E7B
#define GL_FRACTIONAL_EVEN_OES 0x8E7C
#define GL_MAX_PATCH_VERTICES_OES 0x8E7D
#define GL_MAX_TESS_GEN_LEVEL_OES 0x8E7E
#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES 0x8E7F
#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES 0x8E80
#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES 0x8E81
#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES 0x8E82
#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES 0x8E83
#define GL_MAX_TESS_PATCH_COMPONENTS_OES 0x8E84
#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES 0x8E85
#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES 0x8E86
#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES 0x8E89
#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES 0x8E8A
#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES 0x886C
#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES 0x886D
#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES 0x8E1E
#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES 0x8E1F
#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES 0x92CD
#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES 0x92CE
#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES 0x92D3
#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES 0x92D4
#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES 0x90CB
#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES 0x90CC
#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES 0x90D8
#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES 0x90D9
#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES 0x8221
#define GL_IS_PER_PATCH_OES 0x92E7
#define GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES 0x9307
#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES 0x9308
#define GL_TESS_CONTROL_SHADER_OES 0x8E88
#define GL_TESS_EVALUATION_SHADER_OES 0x8E87
#define GL_TESS_CONTROL_SHADER_BIT_OES 0x00000008
#define GL_TESS_EVALUATION_SHADER_BIT_OES 0x00000010
typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIOESPROC) (GLenum pname, GLint value);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glPatchParameteriOES (GLenum pname, GLint value);
#endif
#endif /* GL_OES_tessellation_shader */
#ifndef GL_OES_texture_3D
#define GL_OES_texture_3D 1
#define GL_TEXTURE_WRAP_R_OES 0x8072
#define GL_TEXTURE_3D_OES 0x806F
#define GL_TEXTURE_BINDING_3D_OES 0x806A
#define GL_MAX_3D_TEXTURE_SIZE_OES 0x8073
#define GL_SAMPLER_3D_OES 0x8B5F
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_OES 0x8CD4
typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DOESPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DOESPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DOESPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexImage3DOES (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
GL_APICALL void GL_APIENTRY glTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
GL_APICALL void GL_APIENTRY glCopyTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glCompressedTexImage3DOES (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
GL_APICALL void GL_APIENTRY glCompressedTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
GL_APICALL void GL_APIENTRY glFramebufferTexture3DOES (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
#endif
#endif /* GL_OES_texture_3D */
#ifndef GL_OES_texture_border_clamp
#define GL_OES_texture_border_clamp 1
#define GL_TEXTURE_BORDER_COLOR_OES 0x1004
#define GL_CLAMP_TO_BORDER_OES 0x812D
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVOESPROC) (GLenum target, GLenum pname, const GLint *params);
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVOESPROC) (GLenum target, GLenum pname, const GLuint *params);
typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVOESPROC) (GLenum target, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVOESPROC) (GLenum target, GLenum pname, GLuint *params);
typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVOESPROC) (GLuint sampler, GLenum pname, const GLint *param);
typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVOESPROC) (GLuint sampler, GLenum pname, const GLuint *param);
typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVOESPROC) (GLuint sampler, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVOESPROC) (GLuint sampler, GLenum pname, GLuint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexParameterIivOES (GLenum target, GLenum pname, const GLint *params);
GL_APICALL void GL_APIENTRY glTexParameterIuivOES (GLenum target, GLenum pname, const GLuint *params);
GL_APICALL void GL_APIENTRY glGetTexParameterIivOES (GLenum target, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetTexParameterIuivOES (GLenum target, GLenum pname, GLuint *params);
GL_APICALL void GL_APIENTRY glSamplerParameterIivOES (GLuint sampler, GLenum pname, const GLint *param);
GL_APICALL void GL_APIENTRY glSamplerParameterIuivOES (GLuint sampler, GLenum pname, const GLuint *param);
GL_APICALL void GL_APIENTRY glGetSamplerParameterIivOES (GLuint sampler, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetSamplerParameterIuivOES (GLuint sampler, GLenum pname, GLuint *params);
#endif
#endif /* GL_OES_texture_border_clamp */
#ifndef GL_OES_texture_buffer
#define GL_OES_texture_buffer 1
#define GL_TEXTURE_BUFFER_OES 0x8C2A
#define GL_TEXTURE_BUFFER_BINDING_OES 0x8C2A
#define GL_MAX_TEXTURE_BUFFER_SIZE_OES 0x8C2B
#define GL_TEXTURE_BINDING_BUFFER_OES 0x8C2C
#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_OES 0x8C2D
#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_OES 0x919F
#define GL_SAMPLER_BUFFER_OES 0x8DC2
#define GL_INT_SAMPLER_BUFFER_OES 0x8DD0
#define GL_UNSIGNED_INT_SAMPLER_BUFFER_OES 0x8DD8
#define GL_IMAGE_BUFFER_OES 0x9051
#define GL_INT_IMAGE_BUFFER_OES 0x905C
#define GL_UNSIGNED_INT_IMAGE_BUFFER_OES 0x9067
#define GL_TEXTURE_BUFFER_OFFSET_OES 0x919D
#define GL_TEXTURE_BUFFER_SIZE_OES 0x919E
typedef void (GL_APIENTRYP PFNGLTEXBUFFEROESPROC) (GLenum target, GLenum internalformat, GLuint buffer);
typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEOESPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexBufferOES (GLenum target, GLenum internalformat, GLuint buffer);
GL_APICALL void GL_APIENTRY glTexBufferRangeOES (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
#endif
#endif /* GL_OES_texture_buffer */
#ifndef GL_OES_texture_compression_astc
#define GL_OES_texture_compression_astc 1
#define GL_COMPRESSED_RGBA_ASTC_3x3x3_OES 0x93C0
#define GL_COMPRESSED_RGBA_ASTC_4x3x3_OES 0x93C1
#define GL_COMPRESSED_RGBA_ASTC_4x4x3_OES 0x93C2
#define GL_COMPRESSED_RGBA_ASTC_4x4x4_OES 0x93C3
#define GL_COMPRESSED_RGBA_ASTC_5x4x4_OES 0x93C4
#define GL_COMPRESSED_RGBA_ASTC_5x5x4_OES 0x93C5
#define GL_COMPRESSED_RGBA_ASTC_5x5x5_OES 0x93C6
#define GL_COMPRESSED_RGBA_ASTC_6x5x5_OES 0x93C7
#define GL_COMPRESSED_RGBA_ASTC_6x6x5_OES 0x93C8
#define GL_COMPRESSED_RGBA_ASTC_6x6x6_OES 0x93C9
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES 0x93E0
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES 0x93E1
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES 0x93E2
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES 0x93E3
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES 0x93E4
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES 0x93E5
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES 0x93E6
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES 0x93E7
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES 0x93E8
#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES 0x93E9
#endif /* GL_OES_texture_compression_astc */
#ifndef GL_OES_texture_cube_map_array
#define GL_OES_texture_cube_map_array 1
#define GL_TEXTURE_CUBE_MAP_ARRAY_OES 0x9009
#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES 0x900A
#define GL_SAMPLER_CUBE_MAP_ARRAY_OES 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES 0x900D
#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES 0x900E
#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES 0x900F
#define GL_IMAGE_CUBE_MAP_ARRAY_OES 0x9054
#define GL_INT_IMAGE_CUBE_MAP_ARRAY_OES 0x905F
#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES 0x906A
#endif /* GL_OES_texture_cube_map_array */
#ifndef GL_OES_texture_float
#define GL_OES_texture_float 1
#endif /* GL_OES_texture_float */
#ifndef GL_OES_texture_float_linear
#define GL_OES_texture_float_linear 1
#endif /* GL_OES_texture_float_linear */
#ifndef GL_OES_texture_half_float
#define GL_OES_texture_half_float 1
#define GL_HALF_FLOAT_OES 0x8D61
#endif /* GL_OES_texture_half_float */
#ifndef GL_OES_texture_half_float_linear
#define GL_OES_texture_half_float_linear 1
#endif /* GL_OES_texture_half_float_linear */
#ifndef GL_OES_texture_npot
#define GL_OES_texture_npot 1
#endif /* GL_OES_texture_npot */
#ifndef GL_OES_texture_stencil8
#define GL_OES_texture_stencil8 1
#define GL_STENCIL_INDEX_OES 0x1901
#define GL_STENCIL_INDEX8_OES 0x8D48
#endif /* GL_OES_texture_stencil8 */
#ifndef GL_OES_texture_storage_multisample_2d_array
#define GL_OES_texture_storage_multisample_2d_array 1
#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES 0x9102
#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES 0x9105
#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910B
#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910C
#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910D
typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEOESPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexStorage3DMultisampleOES (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
#endif
#endif /* GL_OES_texture_storage_multisample_2d_array */
#ifndef GL_OES_texture_view
#define GL_OES_texture_view 1
#define GL_TEXTURE_VIEW_MIN_LEVEL_OES 0x82DB
#define GL_TEXTURE_VIEW_NUM_LEVELS_OES 0x82DC
#define GL_TEXTURE_VIEW_MIN_LAYER_OES 0x82DD
#define GL_TEXTURE_VIEW_NUM_LAYERS_OES 0x82DE
#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF
typedef void (GL_APIENTRYP PFNGLTEXTUREVIEWOESPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTextureViewOES (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
#endif
#endif /* GL_OES_texture_view */
#ifndef GL_OES_vertex_array_object
#define GL_OES_vertex_array_object 1
#define GL_VERTEX_ARRAY_BINDING_OES 0x85B5
typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYOESPROC) (GLuint array);
typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSOESPROC) (GLsizei n, const GLuint *arrays);
typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSOESPROC) (GLsizei n, GLuint *arrays);
typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYOESPROC) (GLuint array);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBindVertexArrayOES (GLuint array);
GL_APICALL void GL_APIENTRY glDeleteVertexArraysOES (GLsizei n, const GLuint *arrays);
GL_APICALL void GL_APIENTRY glGenVertexArraysOES (GLsizei n, GLuint *arrays);
GL_APICALL GLboolean GL_APIENTRY glIsVertexArrayOES (GLuint array);
#endif
#endif /* GL_OES_vertex_array_object */
#ifndef GL_OES_vertex_half_float
#define GL_OES_vertex_half_float 1
#endif /* GL_OES_vertex_half_float */
#ifndef GL_OES_vertex_type_10_10_10_2
#define GL_OES_vertex_type_10_10_10_2 1
#define GL_UNSIGNED_INT_10_10_10_2_OES 0x8DF6
#define GL_INT_10_10_10_2_OES 0x8DF7
#endif /* GL_OES_vertex_type_10_10_10_2 */
#ifndef GL_OES_viewport_array
#define GL_OES_viewport_array 1
#define GL_MAX_VIEWPORTS_OES 0x825B
#define GL_VIEWPORT_SUBPIXEL_BITS_OES 0x825C
#define GL_VIEWPORT_BOUNDS_RANGE_OES 0x825D
#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX_OES 0x825F
typedef void (GL_APIENTRYP PFNGLVIEWPORTARRAYVOESPROC) (GLuint first, GLsizei count, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFOESPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFVOESPROC) (GLuint index, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLSCISSORARRAYVOESPROC) (GLuint first, GLsizei count, const GLint *v);
typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDOESPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDVOESPROC) (GLuint index, const GLint *v);
typedef void (GL_APIENTRYP PFNGLDEPTHRANGEARRAYFVOESPROC) (GLuint first, GLsizei count, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLDEPTHRANGEINDEXEDFOESPROC) (GLuint index, GLfloat n, GLfloat f);
typedef void (GL_APIENTRYP PFNGLGETFLOATI_VOESPROC) (GLenum target, GLuint index, GLfloat *data);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glViewportArrayvOES (GLuint first, GLsizei count, const GLfloat *v);
GL_APICALL void GL_APIENTRY glViewportIndexedfOES (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
GL_APICALL void GL_APIENTRY glViewportIndexedfvOES (GLuint index, const GLfloat *v);
GL_APICALL void GL_APIENTRY glScissorArrayvOES (GLuint first, GLsizei count, const GLint *v);
GL_APICALL void GL_APIENTRY glScissorIndexedOES (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glScissorIndexedvOES (GLuint index, const GLint *v);
GL_APICALL void GL_APIENTRY glDepthRangeArrayfvOES (GLuint first, GLsizei count, const GLfloat *v);
GL_APICALL void GL_APIENTRY glDepthRangeIndexedfOES (GLuint index, GLfloat n, GLfloat f);
GL_APICALL void GL_APIENTRY glGetFloati_vOES (GLenum target, GLuint index, GLfloat *data);
#endif
#endif /* GL_OES_viewport_array */
#ifndef GL_AMD_compressed_3DC_texture
#define GL_AMD_compressed_3DC_texture 1
#define GL_3DC_X_AMD 0x87F9
#define GL_3DC_XY_AMD 0x87FA
#endif /* GL_AMD_compressed_3DC_texture */
#ifndef GL_AMD_compressed_ATC_texture
#define GL_AMD_compressed_ATC_texture 1
#define GL_ATC_RGB_AMD 0x8C92
#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93
#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE
#endif /* GL_AMD_compressed_ATC_texture */
#ifndef GL_AMD_framebuffer_multisample_advanced
#define GL_AMD_framebuffer_multisample_advanced 1
#define GL_RENDERBUFFER_STORAGE_SAMPLES_AMD 0x91B2
#define GL_MAX_COLOR_FRAMEBUFFER_SAMPLES_AMD 0x91B3
#define GL_MAX_COLOR_FRAMEBUFFER_STORAGE_SAMPLES_AMD 0x91B4
#define GL_MAX_DEPTH_STENCIL_FRAMEBUFFER_SAMPLES_AMD 0x91B5
#define GL_NUM_SUPPORTED_MULTISAMPLE_MODES_AMD 0x91B6
#define GL_SUPPORTED_MULTISAMPLE_MODES_AMD 0x91B7
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEADVANCEDAMDPROC) (GLenum target, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEADVANCEDAMDPROC) (GLuint renderbuffer, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleAdvancedAMD (GLenum target, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glNamedRenderbufferStorageMultisampleAdvancedAMD (GLuint renderbuffer, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
#endif
#endif /* GL_AMD_framebuffer_multisample_advanced */
#ifndef GL_AMD_performance_monitor
#define GL_AMD_performance_monitor 1
#define GL_COUNTER_TYPE_AMD 0x8BC0
#define GL_COUNTER_RANGE_AMD 0x8BC1
#define GL_UNSIGNED_INT64_AMD 0x8BC2
#define GL_PERCENTAGE_AMD 0x8BC3
#define GL_PERFMON_RESULT_AVAILABLE_AMD 0x8BC4
#define GL_PERFMON_RESULT_SIZE_AMD 0x8BC5
#define GL_PERFMON_RESULT_AMD 0x8BC6
typedef void (GL_APIENTRYP PFNGLGETPERFMONITORGROUPSAMDPROC) (GLint *numGroups, GLsizei groupsSize, GLuint *groups);
typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERSAMDPROC) (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters);
typedef void (GL_APIENTRYP PFNGLGETPERFMONITORGROUPSTRINGAMDPROC) (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString);
typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC) (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString);
typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERINFOAMDPROC) (GLuint group, GLuint counter, GLenum pname, void *data);
typedef void (GL_APIENTRYP PFNGLGENPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors);
typedef void (GL_APIENTRYP PFNGLDELETEPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors);
typedef void (GL_APIENTRYP PFNGLSELECTPERFMONITORCOUNTERSAMDPROC) (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList);
typedef void (GL_APIENTRYP PFNGLBEGINPERFMONITORAMDPROC) (GLuint monitor);
typedef void (GL_APIENTRYP PFNGLENDPERFMONITORAMDPROC) (GLuint monitor);
typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERDATAAMDPROC) (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetPerfMonitorGroupsAMD (GLint *numGroups, GLsizei groupsSize, GLuint *groups);
GL_APICALL void GL_APIENTRY glGetPerfMonitorCountersAMD (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters);
GL_APICALL void GL_APIENTRY glGetPerfMonitorGroupStringAMD (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString);
GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterStringAMD (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString);
GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterInfoAMD (GLuint group, GLuint counter, GLenum pname, void *data);
GL_APICALL void GL_APIENTRY glGenPerfMonitorsAMD (GLsizei n, GLuint *monitors);
GL_APICALL void GL_APIENTRY glDeletePerfMonitorsAMD (GLsizei n, GLuint *monitors);
GL_APICALL void GL_APIENTRY glSelectPerfMonitorCountersAMD (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList);
GL_APICALL void GL_APIENTRY glBeginPerfMonitorAMD (GLuint monitor);
GL_APICALL void GL_APIENTRY glEndPerfMonitorAMD (GLuint monitor);
GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterDataAMD (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten);
#endif
#endif /* GL_AMD_performance_monitor */
#ifndef GL_AMD_program_binary_Z400
#define GL_AMD_program_binary_Z400 1
#define GL_Z400_BINARY_AMD 0x8740
#endif /* GL_AMD_program_binary_Z400 */
#ifndef GL_ANDROID_extension_pack_es31a
#define GL_ANDROID_extension_pack_es31a 1
#endif /* GL_ANDROID_extension_pack_es31a */
#ifndef GL_ANGLE_depth_texture
#define GL_ANGLE_depth_texture 1
#endif /* GL_ANGLE_depth_texture */
#ifndef GL_ANGLE_framebuffer_blit
#define GL_ANGLE_framebuffer_blit 1
#define GL_READ_FRAMEBUFFER_ANGLE 0x8CA8
#define GL_DRAW_FRAMEBUFFER_ANGLE 0x8CA9
#define GL_DRAW_FRAMEBUFFER_BINDING_ANGLE 0x8CA6
#define GL_READ_FRAMEBUFFER_BINDING_ANGLE 0x8CAA
typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERANGLEPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBlitFramebufferANGLE (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
#endif
#endif /* GL_ANGLE_framebuffer_blit */
#ifndef GL_ANGLE_framebuffer_multisample
#define GL_ANGLE_framebuffer_multisample 1
#define GL_RENDERBUFFER_SAMPLES_ANGLE 0x8CAB
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_ANGLE 0x8D56
#define GL_MAX_SAMPLES_ANGLE 0x8D57
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEANGLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleANGLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
#endif
#endif /* GL_ANGLE_framebuffer_multisample */
#ifndef GL_ANGLE_instanced_arrays
#define GL_ANGLE_instanced_arrays 1
#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE 0x88FE
typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDANGLEPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDANGLEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORANGLEPROC) (GLuint index, GLuint divisor);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenum mode, GLint first, GLsizei count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
GL_APICALL void GL_APIENTRY glVertexAttribDivisorANGLE (GLuint index, GLuint divisor);
#endif
#endif /* GL_ANGLE_instanced_arrays */
#ifndef GL_ANGLE_pack_reverse_row_order
#define GL_ANGLE_pack_reverse_row_order 1
#define GL_PACK_REVERSE_ROW_ORDER_ANGLE 0x93A4
#endif /* GL_ANGLE_pack_reverse_row_order */
#ifndef GL_ANGLE_program_binary
#define GL_ANGLE_program_binary 1
#define GL_PROGRAM_BINARY_ANGLE 0x93A6
#endif /* GL_ANGLE_program_binary */
#ifndef GL_ANGLE_texture_compression_dxt3
#define GL_ANGLE_texture_compression_dxt3 1
#define GL_COMPRESSED_RGBA_S3TC_DXT3_ANGLE 0x83F2
#endif /* GL_ANGLE_texture_compression_dxt3 */
#ifndef GL_ANGLE_texture_compression_dxt5
#define GL_ANGLE_texture_compression_dxt5 1
#define GL_COMPRESSED_RGBA_S3TC_DXT5_ANGLE 0x83F3
#endif /* GL_ANGLE_texture_compression_dxt5 */
#ifndef GL_ANGLE_texture_usage
#define GL_ANGLE_texture_usage 1
#define GL_TEXTURE_USAGE_ANGLE 0x93A2
#define GL_FRAMEBUFFER_ATTACHMENT_ANGLE 0x93A3
#endif /* GL_ANGLE_texture_usage */
#ifndef GL_ANGLE_translated_shader_source
#define GL_ANGLE_translated_shader_source 1
#define GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE 0x93A0
typedef void (GL_APIENTRYP PFNGLGETTRANSLATEDSHADERSOURCEANGLEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
#endif
#endif /* GL_ANGLE_translated_shader_source */
#ifndef GL_APPLE_clip_distance
#define GL_APPLE_clip_distance 1
#define GL_MAX_CLIP_DISTANCES_APPLE 0x0D32
#define GL_CLIP_DISTANCE0_APPLE 0x3000
#define GL_CLIP_DISTANCE1_APPLE 0x3001
#define GL_CLIP_DISTANCE2_APPLE 0x3002
#define GL_CLIP_DISTANCE3_APPLE 0x3003
#define GL_CLIP_DISTANCE4_APPLE 0x3004
#define GL_CLIP_DISTANCE5_APPLE 0x3005
#define GL_CLIP_DISTANCE6_APPLE 0x3006
#define GL_CLIP_DISTANCE7_APPLE 0x3007
#endif /* GL_APPLE_clip_distance */
#ifndef GL_APPLE_color_buffer_packed_float
#define GL_APPLE_color_buffer_packed_float 1
#endif /* GL_APPLE_color_buffer_packed_float */
#ifndef GL_APPLE_copy_texture_levels
#define GL_APPLE_copy_texture_levels 1
typedef void (GL_APIENTRYP PFNGLCOPYTEXTURELEVELSAPPLEPROC) (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCopyTextureLevelsAPPLE (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount);
#endif
#endif /* GL_APPLE_copy_texture_levels */
#ifndef GL_APPLE_framebuffer_multisample
#define GL_APPLE_framebuffer_multisample 1
#define GL_RENDERBUFFER_SAMPLES_APPLE 0x8CAB
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE 0x8D56
#define GL_MAX_SAMPLES_APPLE 0x8D57
#define GL_READ_FRAMEBUFFER_APPLE 0x8CA8
#define GL_DRAW_FRAMEBUFFER_APPLE 0x8CA9
#define GL_DRAW_FRAMEBUFFER_BINDING_APPLE 0x8CA6
#define GL_READ_FRAMEBUFFER_BINDING_APPLE 0x8CAA
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleAPPLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glResolveMultisampleFramebufferAPPLE (void);
#endif
#endif /* GL_APPLE_framebuffer_multisample */
#ifndef GL_APPLE_rgb_422
#define GL_APPLE_rgb_422 1
#define GL_RGB_422_APPLE 0x8A1F
#define GL_UNSIGNED_SHORT_8_8_APPLE 0x85BA
#define GL_UNSIGNED_SHORT_8_8_REV_APPLE 0x85BB
#define GL_RGB_RAW_422_APPLE 0x8A51
#endif /* GL_APPLE_rgb_422 */
#ifndef GL_APPLE_sync
#define GL_APPLE_sync 1
#define GL_SYNC_OBJECT_APPLE 0x8A53
#define GL_MAX_SERVER_WAIT_TIMEOUT_APPLE 0x9111
#define GL_OBJECT_TYPE_APPLE 0x9112
#define GL_SYNC_CONDITION_APPLE 0x9113
#define GL_SYNC_STATUS_APPLE 0x9114
#define GL_SYNC_FLAGS_APPLE 0x9115
#define GL_SYNC_FENCE_APPLE 0x9116
#define GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE 0x9117
#define GL_UNSIGNALED_APPLE 0x9118
#define GL_SIGNALED_APPLE 0x9119
#define GL_ALREADY_SIGNALED_APPLE 0x911A
#define GL_TIMEOUT_EXPIRED_APPLE 0x911B
#define GL_CONDITION_SATISFIED_APPLE 0x911C
#define GL_WAIT_FAILED_APPLE 0x911D
#define GL_SYNC_FLUSH_COMMANDS_BIT_APPLE 0x00000001
#define GL_TIMEOUT_IGNORED_APPLE 0xFFFFFFFFFFFFFFFFull
typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCAPPLEPROC) (GLenum condition, GLbitfield flags);
typedef GLboolean (GL_APIENTRYP PFNGLISSYNCAPPLEPROC) (GLsync sync);
typedef void (GL_APIENTRYP PFNGLDELETESYNCAPPLEPROC) (GLsync sync);
typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout);
typedef void (GL_APIENTRYP PFNGLWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout);
typedef void (GL_APIENTRYP PFNGLGETINTEGER64VAPPLEPROC) (GLenum pname, GLint64 *params);
typedef void (GL_APIENTRYP PFNGLGETSYNCIVAPPLEPROC) (GLsync sync, GLenum pname, GLsizei count, GLsizei *length, GLint *values);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLsync GL_APIENTRY glFenceSyncAPPLE (GLenum condition, GLbitfield flags);
GL_APICALL GLboolean GL_APIENTRY glIsSyncAPPLE (GLsync sync);
GL_APICALL void GL_APIENTRY glDeleteSyncAPPLE (GLsync sync);
GL_APICALL GLenum GL_APIENTRY glClientWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout);
GL_APICALL void GL_APIENTRY glWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout);
GL_APICALL void GL_APIENTRY glGetInteger64vAPPLE (GLenum pname, GLint64 *params);
GL_APICALL void GL_APIENTRY glGetSyncivAPPLE (GLsync sync, GLenum pname, GLsizei count, GLsizei *length, GLint *values);
#endif
#endif /* GL_APPLE_sync */
#ifndef GL_APPLE_texture_format_BGRA8888
#define GL_APPLE_texture_format_BGRA8888 1
#define GL_BGRA_EXT 0x80E1
#define GL_BGRA8_EXT 0x93A1
#endif /* GL_APPLE_texture_format_BGRA8888 */
#ifndef GL_APPLE_texture_max_level
#define GL_APPLE_texture_max_level 1
#define GL_TEXTURE_MAX_LEVEL_APPLE 0x813D
#endif /* GL_APPLE_texture_max_level */
#ifndef GL_APPLE_texture_packed_float
#define GL_APPLE_texture_packed_float 1
#define GL_UNSIGNED_INT_10F_11F_11F_REV_APPLE 0x8C3B
#define GL_UNSIGNED_INT_5_9_9_9_REV_APPLE 0x8C3E
#define GL_R11F_G11F_B10F_APPLE 0x8C3A
#define GL_RGB9_E5_APPLE 0x8C3D
#endif /* GL_APPLE_texture_packed_float */
#ifndef GL_ARM_mali_program_binary
#define GL_ARM_mali_program_binary 1
#define GL_MALI_PROGRAM_BINARY_ARM 0x8F61
#endif /* GL_ARM_mali_program_binary */
#ifndef GL_ARM_mali_shader_binary
#define GL_ARM_mali_shader_binary 1
#define GL_MALI_SHADER_BINARY_ARM 0x8F60
#endif /* GL_ARM_mali_shader_binary */
#ifndef GL_ARM_rgba8
#define GL_ARM_rgba8 1
#endif /* GL_ARM_rgba8 */
#ifndef GL_ARM_shader_framebuffer_fetch
#define GL_ARM_shader_framebuffer_fetch 1
#define GL_FETCH_PER_SAMPLE_ARM 0x8F65
#define GL_FRAGMENT_SHADER_FRAMEBUFFER_FETCH_MRT_ARM 0x8F66
#endif /* GL_ARM_shader_framebuffer_fetch */
#ifndef GL_ARM_shader_framebuffer_fetch_depth_stencil
#define GL_ARM_shader_framebuffer_fetch_depth_stencil 1
#endif /* GL_ARM_shader_framebuffer_fetch_depth_stencil */
#ifndef GL_ARM_texture_unnormalized_coordinates
#define GL_ARM_texture_unnormalized_coordinates 1
#define GL_TEXTURE_UNNORMALIZED_COORDINATES_ARM 0x8F6A
#endif /* GL_ARM_texture_unnormalized_coordinates */
#ifndef GL_DMP_program_binary
#define GL_DMP_program_binary 1
#define GL_SMAPHS30_PROGRAM_BINARY_DMP 0x9251
#define GL_SMAPHS_PROGRAM_BINARY_DMP 0x9252
#define GL_DMP_PROGRAM_BINARY_DMP 0x9253
#endif /* GL_DMP_program_binary */
#ifndef GL_DMP_shader_binary
#define GL_DMP_shader_binary 1
#define GL_SHADER_BINARY_DMP 0x9250
#endif /* GL_DMP_shader_binary */
#ifndef GL_EXT_EGL_image_array
#define GL_EXT_EGL_image_array 1
#endif /* GL_EXT_EGL_image_array */
#ifndef GL_EXT_EGL_image_storage
#define GL_EXT_EGL_image_storage 1
typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXSTORAGEEXTPROC) (GLenum target, GLeglImageOES image, const GLint* attrib_list);
typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXTURESTORAGEEXTPROC) (GLuint texture, GLeglImageOES image, const GLint* attrib_list);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glEGLImageTargetTexStorageEXT (GLenum target, GLeglImageOES image, const GLint* attrib_list);
GL_APICALL void GL_APIENTRY glEGLImageTargetTextureStorageEXT (GLuint texture, GLeglImageOES image, const GLint* attrib_list);
#endif
#endif /* GL_EXT_EGL_image_storage */
#ifndef GL_EXT_EGL_image_storage_compression
#define GL_EXT_EGL_image_storage_compression 1
#define GL_SURFACE_COMPRESSION_EXT 0x96C0
#define GL_SURFACE_COMPRESSION_FIXED_RATE_NONE_EXT 0x96C1
#define GL_SURFACE_COMPRESSION_FIXED_RATE_DEFAULT_EXT 0x96C2
#endif /* GL_EXT_EGL_image_storage_compression */
#ifndef GL_EXT_YUV_target
#define GL_EXT_YUV_target 1
#define GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT 0x8BE7
#endif /* GL_EXT_YUV_target */
#ifndef GL_EXT_base_instance
#define GL_EXT_base_instance 1
typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEEXTPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawArraysInstancedBaseInstanceEXT (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseInstanceEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexBaseInstanceEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance);
#endif
#endif /* GL_EXT_base_instance */
#ifndef GL_EXT_blend_func_extended
#define GL_EXT_blend_func_extended 1
#define GL_SRC1_COLOR_EXT 0x88F9
#define GL_SRC1_ALPHA_EXT 0x8589
#define GL_ONE_MINUS_SRC1_COLOR_EXT 0x88FA
#define GL_ONE_MINUS_SRC1_ALPHA_EXT 0x88FB
#define GL_SRC_ALPHA_SATURATE_EXT 0x0308
#define GL_LOCATION_INDEX_EXT 0x930F
#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT 0x88FC
typedef void (GL_APIENTRYP PFNGLBINDFRAGDATALOCATIONINDEXEDEXTPROC) (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name);
typedef void (GL_APIENTRYP PFNGLBINDFRAGDATALOCATIONEXTPROC) (GLuint program, GLuint color, const GLchar *name);
typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONINDEXEXTPROC) (GLuint program, GLenum programInterface, const GLchar *name);
typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATAINDEXEXTPROC) (GLuint program, const GLchar *name);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBindFragDataLocationIndexedEXT (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name);
GL_APICALL void GL_APIENTRY glBindFragDataLocationEXT (GLuint program, GLuint color, const GLchar *name);
GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocationIndexEXT (GLuint program, GLenum programInterface, const GLchar *name);
GL_APICALL GLint GL_APIENTRY glGetFragDataIndexEXT (GLuint program, const GLchar *name);
#endif
#endif /* GL_EXT_blend_func_extended */
#ifndef GL_EXT_blend_minmax
#define GL_EXT_blend_minmax 1
#define GL_MIN_EXT 0x8007
#define GL_MAX_EXT 0x8008
#endif /* GL_EXT_blend_minmax */
#ifndef GL_EXT_buffer_storage
#define GL_EXT_buffer_storage 1
#define GL_MAP_READ_BIT 0x0001
#define GL_MAP_WRITE_BIT 0x0002
#define GL_MAP_PERSISTENT_BIT_EXT 0x0040
#define GL_MAP_COHERENT_BIT_EXT 0x0080
#define GL_DYNAMIC_STORAGE_BIT_EXT 0x0100
#define GL_CLIENT_STORAGE_BIT_EXT 0x0200
#define GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT 0x00004000
#define GL_BUFFER_IMMUTABLE_STORAGE_EXT 0x821F
#define GL_BUFFER_STORAGE_FLAGS_EXT 0x8220
typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEEXTPROC) (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBufferStorageEXT (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags);
#endif
#endif /* GL_EXT_buffer_storage */
#ifndef GL_EXT_clear_texture
#define GL_EXT_clear_texture 1
typedef void (GL_APIENTRYP PFNGLCLEARTEXIMAGEEXTPROC) (GLuint texture, GLint level, GLenum format, GLenum type, const void *data);
typedef void (GL_APIENTRYP PFNGLCLEARTEXSUBIMAGEEXTPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glClearTexImageEXT (GLuint texture, GLint level, GLenum format, GLenum type, const void *data);
GL_APICALL void GL_APIENTRY glClearTexSubImageEXT (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data);
#endif
#endif /* GL_EXT_clear_texture */
#ifndef GL_EXT_clip_control
#define GL_EXT_clip_control 1
#define GL_LOWER_LEFT_EXT 0x8CA1
#define GL_UPPER_LEFT_EXT 0x8CA2
#define GL_NEGATIVE_ONE_TO_ONE_EXT 0x935E
#define GL_ZERO_TO_ONE_EXT 0x935F
#define GL_CLIP_ORIGIN_EXT 0x935C
#define GL_CLIP_DEPTH_MODE_EXT 0x935D
typedef void (GL_APIENTRYP PFNGLCLIPCONTROLEXTPROC) (GLenum origin, GLenum depth);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glClipControlEXT (GLenum origin, GLenum depth);
#endif
#endif /* GL_EXT_clip_control */
#ifndef GL_EXT_clip_cull_distance
#define GL_EXT_clip_cull_distance 1
#define GL_MAX_CLIP_DISTANCES_EXT 0x0D32
#define GL_MAX_CULL_DISTANCES_EXT 0x82F9
#define GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES_EXT 0x82FA
#define GL_CLIP_DISTANCE0_EXT 0x3000
#define GL_CLIP_DISTANCE1_EXT 0x3001
#define GL_CLIP_DISTANCE2_EXT 0x3002
#define GL_CLIP_DISTANCE3_EXT 0x3003
#define GL_CLIP_DISTANCE4_EXT 0x3004
#define GL_CLIP_DISTANCE5_EXT 0x3005
#define GL_CLIP_DISTANCE6_EXT 0x3006
#define GL_CLIP_DISTANCE7_EXT 0x3007
#endif /* GL_EXT_clip_cull_distance */
#ifndef GL_EXT_color_buffer_float
#define GL_EXT_color_buffer_float 1
#endif /* GL_EXT_color_buffer_float */
#ifndef GL_EXT_color_buffer_half_float
#define GL_EXT_color_buffer_half_float 1
#define GL_RGBA16F_EXT 0x881A
#define GL_RGB16F_EXT 0x881B
#define GL_RG16F_EXT 0x822F
#define GL_R16F_EXT 0x822D
#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE_EXT 0x8211
#define GL_UNSIGNED_NORMALIZED_EXT 0x8C17
#endif /* GL_EXT_color_buffer_half_float */
#ifndef GL_EXT_conservative_depth
#define GL_EXT_conservative_depth 1
#endif /* GL_EXT_conservative_depth */
#ifndef GL_EXT_copy_image
#define GL_EXT_copy_image 1
typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAEXTPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCopyImageSubDataEXT (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
#endif
#endif /* GL_EXT_copy_image */
#ifndef GL_EXT_debug_label
#define GL_EXT_debug_label 1
#define GL_PROGRAM_PIPELINE_OBJECT_EXT 0x8A4F
#define GL_PROGRAM_OBJECT_EXT 0x8B40
#define GL_SHADER_OBJECT_EXT 0x8B48
#define GL_BUFFER_OBJECT_EXT 0x9151
#define GL_QUERY_OBJECT_EXT 0x9153
#define GL_VERTEX_ARRAY_OBJECT_EXT 0x9154
#define GL_TRANSFORM_FEEDBACK 0x8E22
typedef void (GL_APIENTRYP PFNGLLABELOBJECTEXTPROC) (GLenum type, GLuint object, GLsizei length, const GLchar *label);
typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELEXTPROC) (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glLabelObjectEXT (GLenum type, GLuint object, GLsizei length, const GLchar *label);
GL_APICALL void GL_APIENTRY glGetObjectLabelEXT (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label);
#endif
#endif /* GL_EXT_debug_label */
#ifndef GL_EXT_debug_marker
#define GL_EXT_debug_marker 1
typedef void (GL_APIENTRYP PFNGLINSERTEVENTMARKEREXTPROC) (GLsizei length, const GLchar *marker);
typedef void (GL_APIENTRYP PFNGLPUSHGROUPMARKEREXTPROC) (GLsizei length, const GLchar *marker);
typedef void (GL_APIENTRYP PFNGLPOPGROUPMARKEREXTPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glInsertEventMarkerEXT (GLsizei length, const GLchar *marker);
GL_APICALL void GL_APIENTRY glPushGroupMarkerEXT (GLsizei length, const GLchar *marker);
GL_APICALL void GL_APIENTRY glPopGroupMarkerEXT (void);
#endif
#endif /* GL_EXT_debug_marker */
#ifndef GL_EXT_depth_clamp
#define GL_EXT_depth_clamp 1
#define GL_DEPTH_CLAMP_EXT 0x864F
#endif /* GL_EXT_depth_clamp */
#ifndef GL_EXT_discard_framebuffer
#define GL_EXT_discard_framebuffer 1
#define GL_COLOR_EXT 0x1800
#define GL_DEPTH_EXT 0x1801
#define GL_STENCIL_EXT 0x1802
typedef void (GL_APIENTRYP PFNGLDISCARDFRAMEBUFFEREXTPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei numAttachments, const GLenum *attachments);
#endif
#endif /* GL_EXT_discard_framebuffer */
#ifndef GL_EXT_disjoint_timer_query
#define GL_EXT_disjoint_timer_query 1
#define GL_QUERY_COUNTER_BITS_EXT 0x8864
#define GL_CURRENT_QUERY_EXT 0x8865
#define GL_QUERY_RESULT_EXT 0x8866
#define GL_QUERY_RESULT_AVAILABLE_EXT 0x8867
#define GL_TIME_ELAPSED_EXT 0x88BF
#define GL_TIMESTAMP_EXT 0x8E28
#define GL_GPU_DISJOINT_EXT 0x8FBB
typedef void (GL_APIENTRYP PFNGLGENQUERIESEXTPROC) (GLsizei n, GLuint *ids);
typedef void (GL_APIENTRYP PFNGLDELETEQUERIESEXTPROC) (GLsizei n, const GLuint *ids);
typedef GLboolean (GL_APIENTRYP PFNGLISQUERYEXTPROC) (GLuint id);
typedef void (GL_APIENTRYP PFNGLBEGINQUERYEXTPROC) (GLenum target, GLuint id);
typedef void (GL_APIENTRYP PFNGLENDQUERYEXTPROC) (GLenum target);
typedef void (GL_APIENTRYP PFNGLQUERYCOUNTEREXTPROC) (GLuint id, GLenum target);
typedef void (GL_APIENTRYP PFNGLGETQUERYIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTIVEXTPROC) (GLuint id, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVEXTPROC) (GLuint id, GLenum pname, GLuint *params);
typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTI64VEXTPROC) (GLuint id, GLenum pname, GLint64 *params);
typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUI64VEXTPROC) (GLuint id, GLenum pname, GLuint64 *params);
typedef void (GL_APIENTRYP PFNGLGETINTEGER64VEXTPROC) (GLenum pname, GLint64 *data);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGenQueriesEXT (GLsizei n, GLuint *ids);
GL_APICALL void GL_APIENTRY glDeleteQueriesEXT (GLsizei n, const GLuint *ids);
GL_APICALL GLboolean GL_APIENTRY glIsQueryEXT (GLuint id);
GL_APICALL void GL_APIENTRY glBeginQueryEXT (GLenum target, GLuint id);
GL_APICALL void GL_APIENTRY glEndQueryEXT (GLenum target);
GL_APICALL void GL_APIENTRY glQueryCounterEXT (GLuint id, GLenum target);
GL_APICALL void GL_APIENTRY glGetQueryivEXT (GLenum target, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetQueryObjectivEXT (GLuint id, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetQueryObjectuivEXT (GLuint id, GLenum pname, GLuint *params);
GL_APICALL void GL_APIENTRY glGetQueryObjecti64vEXT (GLuint id, GLenum pname, GLint64 *params);
GL_APICALL void GL_APIENTRY glGetQueryObjectui64vEXT (GLuint id, GLenum pname, GLuint64 *params);
GL_APICALL void GL_APIENTRY glGetInteger64vEXT (GLenum pname, GLint64 *data);
#endif
#endif /* GL_EXT_disjoint_timer_query */
#ifndef GL_EXT_draw_buffers
#define GL_EXT_draw_buffers 1
#define GL_MAX_COLOR_ATTACHMENTS_EXT 0x8CDF
#define GL_MAX_DRAW_BUFFERS_EXT 0x8824
#define GL_DRAW_BUFFER0_EXT 0x8825
#define GL_DRAW_BUFFER1_EXT 0x8826
#define GL_DRAW_BUFFER2_EXT 0x8827
#define GL_DRAW_BUFFER3_EXT 0x8828
#define GL_DRAW_BUFFER4_EXT 0x8829
#define GL_DRAW_BUFFER5_EXT 0x882A
#define GL_DRAW_BUFFER6_EXT 0x882B
#define GL_DRAW_BUFFER7_EXT 0x882C
#define GL_DRAW_BUFFER8_EXT 0x882D
#define GL_DRAW_BUFFER9_EXT 0x882E
#define GL_DRAW_BUFFER10_EXT 0x882F
#define GL_DRAW_BUFFER11_EXT 0x8830
#define GL_DRAW_BUFFER12_EXT 0x8831
#define GL_DRAW_BUFFER13_EXT 0x8832
#define GL_DRAW_BUFFER14_EXT 0x8833
#define GL_DRAW_BUFFER15_EXT 0x8834
#define GL_COLOR_ATTACHMENT0_EXT 0x8CE0
#define GL_COLOR_ATTACHMENT1_EXT 0x8CE1
#define GL_COLOR_ATTACHMENT2_EXT 0x8CE2
#define GL_COLOR_ATTACHMENT3_EXT 0x8CE3
#define GL_COLOR_ATTACHMENT4_EXT 0x8CE4
#define GL_COLOR_ATTACHMENT5_EXT 0x8CE5
#define GL_COLOR_ATTACHMENT6_EXT 0x8CE6
#define GL_COLOR_ATTACHMENT7_EXT 0x8CE7
#define GL_COLOR_ATTACHMENT8_EXT 0x8CE8
#define GL_COLOR_ATTACHMENT9_EXT 0x8CE9
#define GL_COLOR_ATTACHMENT10_EXT 0x8CEA
#define GL_COLOR_ATTACHMENT11_EXT 0x8CEB
#define GL_COLOR_ATTACHMENT12_EXT 0x8CEC
#define GL_COLOR_ATTACHMENT13_EXT 0x8CED
#define GL_COLOR_ATTACHMENT14_EXT 0x8CEE
#define GL_COLOR_ATTACHMENT15_EXT 0x8CEF
typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSEXTPROC) (GLsizei n, const GLenum *bufs);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawBuffersEXT (GLsizei n, const GLenum *bufs);
#endif
#endif /* GL_EXT_draw_buffers */
#ifndef GL_EXT_draw_buffers_indexed
#define GL_EXT_draw_buffers_indexed 1
typedef void (GL_APIENTRYP PFNGLENABLEIEXTPROC) (GLenum target, GLuint index);
typedef void (GL_APIENTRYP PFNGLDISABLEIEXTPROC) (GLenum target, GLuint index);
typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIEXTPROC) (GLuint buf, GLenum mode);
typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIEXTPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
typedef void (GL_APIENTRYP PFNGLBLENDFUNCIEXTPROC) (GLuint buf, GLenum src, GLenum dst);
typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIEXTPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
typedef void (GL_APIENTRYP PFNGLCOLORMASKIEXTPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIEXTPROC) (GLenum target, GLuint index);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glEnableiEXT (GLenum target, GLuint index);
GL_APICALL void GL_APIENTRY glDisableiEXT (GLenum target, GLuint index);
GL_APICALL void GL_APIENTRY glBlendEquationiEXT (GLuint buf, GLenum mode);
GL_APICALL void GL_APIENTRY glBlendEquationSeparateiEXT (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
GL_APICALL void GL_APIENTRY glBlendFunciEXT (GLuint buf, GLenum src, GLenum dst);
GL_APICALL void GL_APIENTRY glBlendFuncSeparateiEXT (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
GL_APICALL void GL_APIENTRY glColorMaskiEXT (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
GL_APICALL GLboolean GL_APIENTRY glIsEnablediEXT (GLenum target, GLuint index);
#endif
#endif /* GL_EXT_draw_buffers_indexed */
#ifndef GL_EXT_draw_elements_base_vertex
#define GL_EXT_draw_elements_base_vertex 1
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXEXTPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawElementsBaseVertexEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertexEXT (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
#endif
#endif /* GL_EXT_draw_elements_base_vertex */
#ifndef GL_EXT_draw_instanced
#define GL_EXT_draw_instanced 1
typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDEXTPROC) (GLenum mode, GLint start, GLsizei count, GLsizei primcount);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawArraysInstancedEXT (GLenum mode, GLint start, GLsizei count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
#endif
#endif /* GL_EXT_draw_instanced */
#ifndef GL_EXT_draw_transform_feedback
#define GL_EXT_draw_transform_feedback 1
typedef void (GL_APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKEXTPROC) (GLenum mode, GLuint id);
typedef void (GL_APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDEXTPROC) (GLenum mode, GLuint id, GLsizei instancecount);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawTransformFeedbackEXT (GLenum mode, GLuint id);
GL_APICALL void GL_APIENTRY glDrawTransformFeedbackInstancedEXT (GLenum mode, GLuint id, GLsizei instancecount);
#endif
#endif /* GL_EXT_draw_transform_feedback */
#ifndef GL_EXT_external_buffer
#define GL_EXT_external_buffer 1
typedef void *GLeglClientBufferEXT;
typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEEXTERNALEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERSTORAGEEXTERNALEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBufferStorageExternalEXT (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
GL_APICALL void GL_APIENTRY glNamedBufferStorageExternalEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
#endif
#endif /* GL_EXT_external_buffer */
#ifndef GL_EXT_float_blend
#define GL_EXT_float_blend 1
#endif /* GL_EXT_float_blend */
#ifndef GL_EXT_fragment_shading_rate
#define GL_EXT_fragment_shading_rate 1
#define GL_SHADING_RATE_1X1_PIXELS_EXT 0x96A6
#define GL_SHADING_RATE_1X2_PIXELS_EXT 0x96A7
#define GL_SHADING_RATE_2X1_PIXELS_EXT 0x96A8
#define GL_SHADING_RATE_2X2_PIXELS_EXT 0x96A9
#define GL_SHADING_RATE_1X4_PIXELS_EXT 0x96AA
#define GL_SHADING_RATE_4X1_PIXELS_EXT 0x96AB
#define GL_SHADING_RATE_4X2_PIXELS_EXT 0x96AC
#define GL_SHADING_RATE_2X4_PIXELS_EXT 0x96AD
#define GL_SHADING_RATE_4X4_PIXELS_EXT 0x96AE
#define GL_SHADING_RATE_EXT 0x96D0
#define GL_SHADING_RATE_ATTACHMENT_EXT 0x96D1
#define GL_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_EXT 0x96D2
#define GL_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_EXT 0x96D3
#define GL_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_EXT 0x96D4
#define GL_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_EXT 0x96D5
#define GL_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_EXT 0x96D6
#define GL_MIN_FRAGMENT_SHADING_RATE_ATTACHMENT_TEXEL_WIDTH_EXT 0x96D7
#define GL_MAX_FRAGMENT_SHADING_RATE_ATTACHMENT_TEXEL_WIDTH_EXT 0x96D8
#define GL_MIN_FRAGMENT_SHADING_RATE_ATTACHMENT_TEXEL_HEIGHT_EXT 0x96D9
#define GL_MAX_FRAGMENT_SHADING_RATE_ATTACHMENT_TEXEL_HEIGHT_EXT 0x96DA
#define GL_MAX_FRAGMENT_SHADING_RATE_ATTACHMENT_TEXEL_ASPECT_RATIO_EXT 0x96DB
#define GL_MAX_FRAGMENT_SHADING_RATE_ATTACHMENT_LAYERS_EXT 0x96DC
#define GL_FRAGMENT_SHADING_RATE_WITH_SHADER_DEPTH_STENCIL_WRITES_SUPPORTED_EXT 0x96DD
#define GL_FRAGMENT_SHADING_RATE_WITH_SAMPLE_MASK_SUPPORTED_EXT 0x96DE
#define GL_FRAGMENT_SHADING_RATE_ATTACHMENT_WITH_DEFAULT_FRAMEBUFFER_SUPPORTED_EXT 0x96DF
#define GL_FRAGMENT_SHADING_RATE_NON_TRIVIAL_COMBINERS_SUPPORTED_EXT 0x8F6F
typedef void (GL_APIENTRYP PFNGLGETFRAGMENTSHADINGRATESEXTPROC) (GLsizei samples, GLsizei maxCount, GLsizei *count, GLenum *shadingRates);
typedef void (GL_APIENTRYP PFNGLSHADINGRATEEXTPROC) (GLenum rate);
typedef void (GL_APIENTRYP PFNGLSHADINGRATECOMBINEROPSEXTPROC) (GLenum combinerOp0, GLenum combinerOp1);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERSHADINGRATEEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint baseLayer, GLsizei numLayers, GLsizei texelWidth, GLsizei texelHeight);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetFragmentShadingRatesEXT (GLsizei samples, GLsizei maxCount, GLsizei *count, GLenum *shadingRates);
GL_APICALL void GL_APIENTRY glShadingRateEXT (GLenum rate);
GL_APICALL void GL_APIENTRY glShadingRateCombinerOpsEXT (GLenum combinerOp0, GLenum combinerOp1);
GL_APICALL void GL_APIENTRY glFramebufferShadingRateEXT (GLenum target, GLenum attachment, GLuint texture, GLint baseLayer, GLsizei numLayers, GLsizei texelWidth, GLsizei texelHeight);
#endif
#endif /* GL_EXT_fragment_shading_rate */
#ifndef GL_EXT_geometry_point_size
#define GL_EXT_geometry_point_size 1
#endif /* GL_EXT_geometry_point_size */
#ifndef GL_EXT_geometry_shader
#define GL_EXT_geometry_shader 1
#define GL_GEOMETRY_SHADER_EXT 0x8DD9
#define GL_GEOMETRY_SHADER_BIT_EXT 0x00000004
#define GL_GEOMETRY_LINKED_VERTICES_OUT_EXT 0x8916
#define GL_GEOMETRY_LINKED_INPUT_TYPE_EXT 0x8917
#define GL_GEOMETRY_LINKED_OUTPUT_TYPE_EXT 0x8918
#define GL_GEOMETRY_SHADER_INVOCATIONS_EXT 0x887F
#define GL_LAYER_PROVOKING_VERTEX_EXT 0x825E
#define GL_LINES_ADJACENCY_EXT 0x000A
#define GL_LINE_STRIP_ADJACENCY_EXT 0x000B
#define GL_TRIANGLES_ADJACENCY_EXT 0x000C
#define GL_TRIANGLE_STRIP_ADJACENCY_EXT 0x000D
#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8DDF
#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS_EXT 0x8A2C
#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8A32
#define GL_MAX_GEOMETRY_INPUT_COMPONENTS_EXT 0x9123
#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_EXT 0x9124
#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT 0x8DE0
#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT 0x8DE1
#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS_EXT 0x8E5A
#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT 0x8C29
#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_EXT 0x92CF
#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS_EXT 0x92D5
#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS_EXT 0x90CD
#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_EXT 0x90D7
#define GL_FIRST_VERTEX_CONVENTION_EXT 0x8E4D
#define GL_LAST_VERTEX_CONVENTION_EXT 0x8E4E
#define GL_UNDEFINED_VERTEX_EXT 0x8260
#define GL_PRIMITIVES_GENERATED_EXT 0x8C87
#define GL_FRAMEBUFFER_DEFAULT_LAYERS_EXT 0x9312
#define GL_MAX_FRAMEBUFFER_LAYERS_EXT 0x9317
#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT 0x8DA8
#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT 0x8DA7
#define GL_REFERENCED_BY_GEOMETRY_SHADER_EXT 0x9309
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferTextureEXT (GLenum target, GLenum attachment, GLuint texture, GLint level);
#endif
#endif /* GL_EXT_geometry_shader */
#ifndef GL_EXT_gpu_shader5
#define GL_EXT_gpu_shader5 1
#endif /* GL_EXT_gpu_shader5 */
#ifndef GL_EXT_instanced_arrays
#define GL_EXT_instanced_arrays 1
#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_EXT 0x88FE
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISOREXTPROC) (GLuint index, GLuint divisor);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glVertexAttribDivisorEXT (GLuint index, GLuint divisor);
#endif
#endif /* GL_EXT_instanced_arrays */
#ifndef GL_EXT_map_buffer_range
#define GL_EXT_map_buffer_range 1
#define GL_MAP_READ_BIT_EXT 0x0001
#define GL_MAP_WRITE_BIT_EXT 0x0002
#define GL_MAP_INVALIDATE_RANGE_BIT_EXT 0x0004
#define GL_MAP_INVALIDATE_BUFFER_BIT_EXT 0x0008
#define GL_MAP_FLUSH_EXPLICIT_BIT_EXT 0x0010
#define GL_MAP_UNSYNCHRONIZED_BIT_EXT 0x0020
typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void *GL_APIENTRY glMapBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
GL_APICALL void GL_APIENTRY glFlushMappedBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length);
#endif
#endif /* GL_EXT_map_buffer_range */
#ifndef GL_EXT_memory_object
#define GL_EXT_memory_object 1
#define GL_TEXTURE_TILING_EXT 0x9580
#define GL_DEDICATED_MEMORY_OBJECT_EXT 0x9581
#define GL_PROTECTED_MEMORY_OBJECT_EXT 0x959B
#define GL_NUM_TILING_TYPES_EXT 0x9582
#define GL_TILING_TYPES_EXT 0x9583
#define GL_OPTIMAL_TILING_EXT 0x9584
#define GL_LINEAR_TILING_EXT 0x9585
#define GL_NUM_DEVICE_UUIDS_EXT 0x9596
#define GL_DEVICE_UUID_EXT 0x9597
#define GL_DRIVER_UUID_EXT 0x9598
#define GL_UUID_SIZE_EXT 16
typedef void (GL_APIENTRYP PFNGLGETUNSIGNEDBYTEVEXTPROC) (GLenum pname, GLubyte *data);
typedef void (GL_APIENTRYP PFNGLGETUNSIGNEDBYTEI_VEXTPROC) (GLenum target, GLuint index, GLubyte *data);
typedef void (GL_APIENTRYP PFNGLDELETEMEMORYOBJECTSEXTPROC) (GLsizei n, const GLuint *memoryObjects);
typedef GLboolean (GL_APIENTRYP PFNGLISMEMORYOBJECTEXTPROC) (GLuint memoryObject);
typedef void (GL_APIENTRYP PFNGLCREATEMEMORYOBJECTSEXTPROC) (GLsizei n, GLuint *memoryObjects);
typedef void (GL_APIENTRYP PFNGLMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, const GLint *params);
typedef void (GL_APIENTRYP PFNGLGETMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM2DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM3DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEMEMEXTPROC) (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM2DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM2DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM3DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM3DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERSTORAGEMEMEXTPROC) (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetUnsignedBytevEXT (GLenum pname, GLubyte *data);
GL_APICALL void GL_APIENTRY glGetUnsignedBytei_vEXT (GLenum target, GLuint index, GLubyte *data);
GL_APICALL void GL_APIENTRY glDeleteMemoryObjectsEXT (GLsizei n, const GLuint *memoryObjects);
GL_APICALL GLboolean GL_APIENTRY glIsMemoryObjectEXT (GLuint memoryObject);
GL_APICALL void GL_APIENTRY glCreateMemoryObjectsEXT (GLsizei n, GLuint *memoryObjects);
GL_APICALL void GL_APIENTRY glMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, const GLint *params);
GL_APICALL void GL_APIENTRY glGetMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glTexStorageMem2DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTexStorageMem2DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTexStorageMem3DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTexStorageMem3DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glBufferStorageMemEXT (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTextureStorageMem2DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTextureStorageMem2DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTextureStorageMem3DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTextureStorageMem3DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glNamedBufferStorageMemEXT (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset);
#endif
#endif /* GL_EXT_memory_object */
#ifndef GL_EXT_memory_object_fd
#define GL_EXT_memory_object_fd 1
#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYFDEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, GLint fd);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glImportMemoryFdEXT (GLuint memory, GLuint64 size, GLenum handleType, GLint fd);
#endif
#endif /* GL_EXT_memory_object_fd */
#ifndef GL_EXT_memory_object_win32
#define GL_EXT_memory_object_win32 1
#define GL_HANDLE_TYPE_OPAQUE_WIN32_EXT 0x9587
#define GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT 0x9588
#define GL_DEVICE_LUID_EXT 0x9599
#define GL_DEVICE_NODE_MASK_EXT 0x959A
#define GL_LUID_SIZE_EXT 8
#define GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT 0x9589
#define GL_HANDLE_TYPE_D3D12_RESOURCE_EXT 0x958A
#define GL_HANDLE_TYPE_D3D11_IMAGE_EXT 0x958B
#define GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT 0x958C
typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYWIN32HANDLEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, void *handle);
typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYWIN32NAMEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, const void *name);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glImportMemoryWin32HandleEXT (GLuint memory, GLuint64 size, GLenum handleType, void *handle);
GL_APICALL void GL_APIENTRY glImportMemoryWin32NameEXT (GLuint memory, GLuint64 size, GLenum handleType, const void *name);
#endif
#endif /* GL_EXT_memory_object_win32 */
#ifndef GL_EXT_multi_draw_arrays
#define GL_EXT_multi_draw_arrays 1
typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount);
#endif
#endif /* GL_EXT_multi_draw_arrays */
#ifndef GL_EXT_multi_draw_indirect
#define GL_EXT_multi_draw_indirect 1
typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTEXTPROC) (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride);
typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTEXTPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glMultiDrawArraysIndirectEXT (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride);
GL_APICALL void GL_APIENTRY glMultiDrawElementsIndirectEXT (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride);
#endif
#endif /* GL_EXT_multi_draw_indirect */
#ifndef GL_EXT_multisampled_compatibility
#define GL_EXT_multisampled_compatibility 1
#define GL_MULTISAMPLE_EXT 0x809D
#define GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F
#endif /* GL_EXT_multisampled_compatibility */
#ifndef GL_EXT_multisampled_render_to_texture
#define GL_EXT_multisampled_render_to_texture 1
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C
#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56
#define GL_MAX_SAMPLES_EXT 0x8D57
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples);
#endif
#endif /* GL_EXT_multisampled_render_to_texture */
#ifndef GL_EXT_multisampled_render_to_texture2
#define GL_EXT_multisampled_render_to_texture2 1
#endif /* GL_EXT_multisampled_render_to_texture2 */
#ifndef GL_EXT_multiview_draw_buffers
#define GL_EXT_multiview_draw_buffers 1
#define GL_COLOR_ATTACHMENT_EXT 0x90F0
#define GL_MULTIVIEW_EXT 0x90F1
#define GL_DRAW_BUFFER_EXT 0x0C01
#define GL_READ_BUFFER_EXT 0x0C02
#define GL_MAX_MULTIVIEW_BUFFERS_EXT 0x90F2
typedef void (GL_APIENTRYP PFNGLREADBUFFERINDEXEDEXTPROC) (GLenum src, GLint index);
typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSINDEXEDEXTPROC) (GLint n, const GLenum *location, const GLint *indices);
typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VEXTPROC) (GLenum target, GLuint index, GLint *data);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glReadBufferIndexedEXT (GLenum src, GLint index);
GL_APICALL void GL_APIENTRY glDrawBuffersIndexedEXT (GLint n, const GLenum *location, const GLint *indices);
GL_APICALL void GL_APIENTRY glGetIntegeri_vEXT (GLenum target, GLuint index, GLint *data);
#endif
#endif /* GL_EXT_multiview_draw_buffers */
#ifndef GL_EXT_multiview_tessellation_geometry_shader
#define GL_EXT_multiview_tessellation_geometry_shader 1
#endif /* GL_EXT_multiview_tessellation_geometry_shader */
#ifndef GL_EXT_multiview_texture_multisample
#define GL_EXT_multiview_texture_multisample 1
#endif /* GL_EXT_multiview_texture_multisample */
#ifndef GL_EXT_multiview_timer_query
#define GL_EXT_multiview_timer_query 1
#endif /* GL_EXT_multiview_timer_query */
#ifndef GL_EXT_occlusion_query_boolean
#define GL_EXT_occlusion_query_boolean 1
#define GL_ANY_SAMPLES_PASSED_EXT 0x8C2F
#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT 0x8D6A
#endif /* GL_EXT_occlusion_query_boolean */
#ifndef GL_EXT_polygon_offset_clamp
#define GL_EXT_polygon_offset_clamp 1
#define GL_POLYGON_OFFSET_CLAMP_EXT 0x8E1B
typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETCLAMPEXTPROC) (GLfloat factor, GLfloat units, GLfloat clamp);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glPolygonOffsetClampEXT (GLfloat factor, GLfloat units, GLfloat clamp);
#endif
#endif /* GL_EXT_polygon_offset_clamp */
#ifndef GL_EXT_post_depth_coverage
#define GL_EXT_post_depth_coverage 1
#endif /* GL_EXT_post_depth_coverage */
#ifndef GL_EXT_primitive_bounding_box
#define GL_EXT_primitive_bounding_box 1
#define GL_PRIMITIVE_BOUNDING_BOX_EXT 0x92BE
typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXEXTPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glPrimitiveBoundingBoxEXT (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
#endif
#endif /* GL_EXT_primitive_bounding_box */
#ifndef GL_EXT_protected_textures
#define GL_EXT_protected_textures 1
#define GL_CONTEXT_FLAG_PROTECTED_CONTENT_BIT_EXT 0x00000010
#define GL_TEXTURE_PROTECTED_EXT 0x8BFA
#endif /* GL_EXT_protected_textures */
#ifndef GL_EXT_pvrtc_sRGB
#define GL_EXT_pvrtc_sRGB 1
#define GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT 0x8A54
#define GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT 0x8A55
#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT 0x8A56
#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT 0x8A57
#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG 0x93F0
#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG 0x93F1
#endif /* GL_EXT_pvrtc_sRGB */
#ifndef GL_EXT_raster_multisample
#define GL_EXT_raster_multisample 1
#define GL_RASTER_MULTISAMPLE_EXT 0x9327
#define GL_RASTER_SAMPLES_EXT 0x9328
#define GL_MAX_RASTER_SAMPLES_EXT 0x9329
#define GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT 0x932A
#define GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT 0x932B
#define GL_EFFECTIVE_RASTER_SAMPLES_EXT 0x932C
typedef void (GL_APIENTRYP PFNGLRASTERSAMPLESEXTPROC) (GLuint samples, GLboolean fixedsamplelocations);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRasterSamplesEXT (GLuint samples, GLboolean fixedsamplelocations);
#endif
#endif /* GL_EXT_raster_multisample */
#ifndef GL_EXT_read_format_bgra
#define GL_EXT_read_format_bgra 1
#define GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT 0x8365
#define GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT 0x8366
#endif /* GL_EXT_read_format_bgra */
#ifndef GL_EXT_render_snorm
#define GL_EXT_render_snorm 1
#define GL_R8_SNORM 0x8F94
#define GL_RG8_SNORM 0x8F95
#define GL_RGBA8_SNORM 0x8F97
#define GL_R16_SNORM_EXT 0x8F98
#define GL_RG16_SNORM_EXT 0x8F99
#define GL_RGBA16_SNORM_EXT 0x8F9B
#endif /* GL_EXT_render_snorm */
#ifndef GL_EXT_robustness
#define GL_EXT_robustness 1
#define GL_GUILTY_CONTEXT_RESET_EXT 0x8253
#define GL_INNOCENT_CONTEXT_RESET_EXT 0x8254
#define GL_UNKNOWN_CONTEXT_RESET_EXT 0x8255
#define GL_CONTEXT_ROBUST_ACCESS_EXT 0x90F3
#define GL_RESET_NOTIFICATION_STRATEGY_EXT 0x8256
#define GL_LOSE_CONTEXT_ON_RESET_EXT 0x8252
#define GL_NO_RESET_NOTIFICATION_EXT 0x8261
typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSEXTPROC) (void);
typedef void (GL_APIENTRYP PFNGLREADNPIXELSEXTPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusEXT (void);
GL_APICALL void GL_APIENTRY glReadnPixelsEXT (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
GL_APICALL void GL_APIENTRY glGetnUniformfvEXT (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
GL_APICALL void GL_APIENTRY glGetnUniformivEXT (GLuint program, GLint location, GLsizei bufSize, GLint *params);
#endif
#endif /* GL_EXT_robustness */
#ifndef GL_EXT_sRGB
#define GL_EXT_sRGB 1
#define GL_SRGB_EXT 0x8C40
#define GL_SRGB_ALPHA_EXT 0x8C42
#define GL_SRGB8_ALPHA8_EXT 0x8C43
#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210
#endif /* GL_EXT_sRGB */
#ifndef GL_EXT_sRGB_write_control
#define GL_EXT_sRGB_write_control 1
#define GL_FRAMEBUFFER_SRGB_EXT 0x8DB9
#endif /* GL_EXT_sRGB_write_control */
#ifndef GL_EXT_semaphore
#define GL_EXT_semaphore 1
#define GL_LAYOUT_GENERAL_EXT 0x958D
#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E
#define GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT 0x958F
#define GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT 0x9590
#define GL_LAYOUT_SHADER_READ_ONLY_EXT 0x9591
#define GL_LAYOUT_TRANSFER_SRC_EXT 0x9592
#define GL_LAYOUT_TRANSFER_DST_EXT 0x9593
#define GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT 0x9530
#define GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT 0x9531
typedef void (GL_APIENTRYP PFNGLGENSEMAPHORESEXTPROC) (GLsizei n, GLuint *semaphores);
typedef void (GL_APIENTRYP PFNGLDELETESEMAPHORESEXTPROC) (GLsizei n, const GLuint *semaphores);
typedef GLboolean (GL_APIENTRYP PFNGLISSEMAPHOREEXTPROC) (GLuint semaphore);
typedef void (GL_APIENTRYP PFNGLSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, const GLuint64 *params);
typedef void (GL_APIENTRYP PFNGLGETSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, GLuint64 *params);
typedef void (GL_APIENTRYP PFNGLWAITSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts);
typedef void (GL_APIENTRYP PFNGLSIGNALSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGenSemaphoresEXT (GLsizei n, GLuint *semaphores);
GL_APICALL void GL_APIENTRY glDeleteSemaphoresEXT (GLsizei n, const GLuint *semaphores);
GL_APICALL GLboolean GL_APIENTRY glIsSemaphoreEXT (GLuint semaphore);
GL_APICALL void GL_APIENTRY glSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, const GLuint64 *params);
GL_APICALL void GL_APIENTRY glGetSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, GLuint64 *params);
GL_APICALL void GL_APIENTRY glWaitSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts);
GL_APICALL void GL_APIENTRY glSignalSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts);
#endif
#endif /* GL_EXT_semaphore */
#ifndef GL_EXT_semaphore_fd
#define GL_EXT_semaphore_fd 1
typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREFDEXTPROC) (GLuint semaphore, GLenum handleType, GLint fd);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glImportSemaphoreFdEXT (GLuint semaphore, GLenum handleType, GLint fd);
#endif
#endif /* GL_EXT_semaphore_fd */
#ifndef GL_EXT_semaphore_win32
#define GL_EXT_semaphore_win32 1
#define GL_HANDLE_TYPE_D3D12_FENCE_EXT 0x9594
#define GL_D3D12_FENCE_VALUE_EXT 0x9595
typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREWIN32HANDLEEXTPROC) (GLuint semaphore, GLenum handleType, void *handle);
typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREWIN32NAMEEXTPROC) (GLuint semaphore, GLenum handleType, const void *name);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glImportSemaphoreWin32HandleEXT (GLuint semaphore, GLenum handleType, void *handle);
GL_APICALL void GL_APIENTRY glImportSemaphoreWin32NameEXT (GLuint semaphore, GLenum handleType, const void *name);
#endif
#endif /* GL_EXT_semaphore_win32 */
#ifndef GL_EXT_separate_depth_stencil
#define GL_EXT_separate_depth_stencil 1
#endif /* GL_EXT_separate_depth_stencil */
#ifndef GL_EXT_separate_shader_objects
#define GL_EXT_separate_shader_objects 1
#define GL_ACTIVE_PROGRAM_EXT 0x8259
#define GL_VERTEX_SHADER_BIT_EXT 0x00000001
#define GL_FRAGMENT_SHADER_BIT_EXT 0x00000002
#define GL_ALL_SHADER_BITS_EXT 0xFFFFFFFF
#define GL_PROGRAM_SEPARABLE_EXT 0x8258
#define GL_PROGRAM_PIPELINE_BINDING_EXT 0x825A
typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMEXTPROC) (GLuint pipeline, GLuint program);
typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEEXTPROC) (GLuint pipeline);
typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVEXTPROC) (GLenum type, GLsizei count, const GLchar **strings);
typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESEXTPROC) (GLsizei n, const GLuint *pipelines);
typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESEXTPROC) (GLsizei n, GLuint *pipelines);
typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGEXTPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVEXTPROC) (GLuint pipeline, GLenum pname, GLint *params);
typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEEXTPROC) (GLuint pipeline);
typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIEXTPROC) (GLuint program, GLenum pname, GLint value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FEXTPROC) (GLuint program, GLint location, GLfloat v0);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IEXTPROC) (GLuint program, GLint location, GLint v0);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESEXTPROC) (GLuint pipeline, GLbitfield stages, GLuint program);
typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEEXTPROC) (GLuint pipeline);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIEXTPROC) (GLuint program, GLint location, GLuint v0);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glActiveShaderProgramEXT (GLuint pipeline, GLuint program);
GL_APICALL void GL_APIENTRY glBindProgramPipelineEXT (GLuint pipeline);
GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramvEXT (GLenum type, GLsizei count, const GLchar **strings);
GL_APICALL void GL_APIENTRY glDeleteProgramPipelinesEXT (GLsizei n, const GLuint *pipelines);
GL_APICALL void GL_APIENTRY glGenProgramPipelinesEXT (GLsizei n, GLuint *pipelines);
GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLogEXT (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
GL_APICALL void GL_APIENTRY glGetProgramPipelineivEXT (GLuint pipeline, GLenum pname, GLint *params);
GL_APICALL GLboolean GL_APIENTRY glIsProgramPipelineEXT (GLuint pipeline);
GL_APICALL void GL_APIENTRY glProgramParameteriEXT (GLuint program, GLenum pname, GLint value);
GL_APICALL void GL_APIENTRY glProgramUniform1fEXT (GLuint program, GLint location, GLfloat v0);
GL_APICALL void GL_APIENTRY glProgramUniform1fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniform1iEXT (GLuint program, GLint location, GLint v0);
GL_APICALL void GL_APIENTRY glProgramUniform1ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glProgramUniform2fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1);
GL_APICALL void GL_APIENTRY glProgramUniform2fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniform2iEXT (GLuint program, GLint location, GLint v0, GLint v1);
GL_APICALL void GL_APIENTRY glProgramUniform2ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glProgramUniform3fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
GL_APICALL void GL_APIENTRY glProgramUniform3fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniform3iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
GL_APICALL void GL_APIENTRY glProgramUniform3ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glProgramUniform4fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
GL_APICALL void GL_APIENTRY glProgramUniform4fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniform4iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
GL_APICALL void GL_APIENTRY glProgramUniform4ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUseProgramStagesEXT (GLuint pipeline, GLbitfield stages, GLuint program);
GL_APICALL void GL_APIENTRY glValidateProgramPipelineEXT (GLuint pipeline);
GL_APICALL void GL_APIENTRY glProgramUniform1uiEXT (GLuint program, GLint location, GLuint v0);
GL_APICALL void GL_APIENTRY glProgramUniform2uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1);
GL_APICALL void GL_APIENTRY glProgramUniform3uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
GL_APICALL void GL_APIENTRY glProgramUniform4uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
GL_APICALL void GL_APIENTRY glProgramUniform1uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
GL_APICALL void GL_APIENTRY glProgramUniform2uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
GL_APICALL void GL_APIENTRY glProgramUniform3uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
GL_APICALL void GL_APIENTRY glProgramUniform4uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
#endif
#endif /* GL_EXT_separate_shader_objects */
#ifndef GL_EXT_shader_framebuffer_fetch
#define GL_EXT_shader_framebuffer_fetch 1
#define GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT 0x8A52
#endif /* GL_EXT_shader_framebuffer_fetch */
#ifndef GL_EXT_shader_framebuffer_fetch_non_coherent
#define GL_EXT_shader_framebuffer_fetch_non_coherent 1
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFETCHBARRIEREXTPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferFetchBarrierEXT (void);
#endif
#endif /* GL_EXT_shader_framebuffer_fetch_non_coherent */
#ifndef GL_EXT_shader_group_vote
#define GL_EXT_shader_group_vote 1
#endif /* GL_EXT_shader_group_vote */
#ifndef GL_EXT_shader_implicit_conversions
#define GL_EXT_shader_implicit_conversions 1
#endif /* GL_EXT_shader_implicit_conversions */
#ifndef GL_EXT_shader_integer_mix
#define GL_EXT_shader_integer_mix 1
#endif /* GL_EXT_shader_integer_mix */
#ifndef GL_EXT_shader_io_blocks
#define GL_EXT_shader_io_blocks 1
#endif /* GL_EXT_shader_io_blocks */
#ifndef GL_EXT_shader_non_constant_global_initializers
#define GL_EXT_shader_non_constant_global_initializers 1
#endif /* GL_EXT_shader_non_constant_global_initializers */
#ifndef GL_EXT_shader_pixel_local_storage
#define GL_EXT_shader_pixel_local_storage 1
#define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT 0x8F63
#define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_SIZE_EXT 0x8F67
#define GL_SHADER_PIXEL_LOCAL_STORAGE_EXT 0x8F64
#endif /* GL_EXT_shader_pixel_local_storage */
#ifndef GL_EXT_shader_pixel_local_storage2
#define GL_EXT_shader_pixel_local_storage2 1
#define GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_FAST_SIZE_EXT 0x9650
#define GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_SIZE_EXT 0x9651
#define GL_FRAMEBUFFER_INCOMPLETE_INSUFFICIENT_SHADER_COMBINED_LOCAL_STORAGE_EXT 0x9652
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPIXELLOCALSTORAGESIZEEXTPROC) (GLuint target, GLsizei size);
typedef GLsizei (GL_APIENTRYP PFNGLGETFRAMEBUFFERPIXELLOCALSTORAGESIZEEXTPROC) (GLuint target);
typedef void (GL_APIENTRYP PFNGLCLEARPIXELLOCALSTORAGEUIEXTPROC) (GLsizei offset, GLsizei n, const GLuint *values);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferPixelLocalStorageSizeEXT (GLuint target, GLsizei size);
GL_APICALL GLsizei GL_APIENTRY glGetFramebufferPixelLocalStorageSizeEXT (GLuint target);
GL_APICALL void GL_APIENTRY glClearPixelLocalStorageuiEXT (GLsizei offset, GLsizei n, const GLuint *values);
#endif
#endif /* GL_EXT_shader_pixel_local_storage2 */
#ifndef GL_EXT_shader_samples_identical
#define GL_EXT_shader_samples_identical 1
#endif /* GL_EXT_shader_samples_identical */
#ifndef GL_EXT_shader_texture_lod
#define GL_EXT_shader_texture_lod 1
#endif /* GL_EXT_shader_texture_lod */
#ifndef GL_EXT_shadow_samplers
#define GL_EXT_shadow_samplers 1
#define GL_TEXTURE_COMPARE_MODE_EXT 0x884C
#define GL_TEXTURE_COMPARE_FUNC_EXT 0x884D
#define GL_COMPARE_REF_TO_TEXTURE_EXT 0x884E
#define GL_SAMPLER_2D_SHADOW_EXT 0x8B62
#endif /* GL_EXT_shadow_samplers */
#ifndef GL_EXT_sparse_texture
#define GL_EXT_sparse_texture 1
#define GL_TEXTURE_SPARSE_EXT 0x91A6
#define GL_VIRTUAL_PAGE_SIZE_INDEX_EXT 0x91A7
#define GL_NUM_SPARSE_LEVELS_EXT 0x91AA
#define GL_NUM_VIRTUAL_PAGE_SIZES_EXT 0x91A8
#define GL_VIRTUAL_PAGE_SIZE_X_EXT 0x9195
#define GL_VIRTUAL_PAGE_SIZE_Y_EXT 0x9196
#define GL_VIRTUAL_PAGE_SIZE_Z_EXT 0x9197
#define GL_TEXTURE_2D_ARRAY 0x8C1A
#define GL_TEXTURE_3D 0x806F
#define GL_MAX_SPARSE_TEXTURE_SIZE_EXT 0x9198
#define GL_MAX_SPARSE_3D_TEXTURE_SIZE_EXT 0x9199
#define GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_EXT 0x919A
#define GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_EXT 0x91A9
typedef void (GL_APIENTRYP PFNGLTEXPAGECOMMITMENTEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexPageCommitmentEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
#endif
#endif /* GL_EXT_sparse_texture */
#ifndef GL_EXT_sparse_texture2
#define GL_EXT_sparse_texture2 1
#endif /* GL_EXT_sparse_texture2 */
#ifndef GL_EXT_tessellation_point_size
#define GL_EXT_tessellation_point_size 1
#endif /* GL_EXT_tessellation_point_size */
#ifndef GL_EXT_tessellation_shader
#define GL_EXT_tessellation_shader 1
#define GL_PATCHES_EXT 0x000E
#define GL_PATCH_VERTICES_EXT 0x8E72
#define GL_TESS_CONTROL_OUTPUT_VERTICES_EXT 0x8E75
#define GL_TESS_GEN_MODE_EXT 0x8E76
#define GL_TESS_GEN_SPACING_EXT 0x8E77
#define GL_TESS_GEN_VERTEX_ORDER_EXT 0x8E78
#define GL_TESS_GEN_POINT_MODE_EXT 0x8E79
#define GL_ISOLINES_EXT 0x8E7A
#define GL_QUADS_EXT 0x0007
#define GL_FRACTIONAL_ODD_EXT 0x8E7B
#define GL_FRACTIONAL_EVEN_EXT 0x8E7C
#define GL_MAX_PATCH_VERTICES_EXT 0x8E7D
#define GL_MAX_TESS_GEN_LEVEL_EXT 0x8E7E
#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_EXT 0x8E7F
#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT 0x8E80
#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_EXT 0x8E81
#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_EXT 0x8E82
#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_EXT 0x8E83
#define GL_MAX_TESS_PATCH_COMPONENTS_EXT 0x8E84
#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_EXT 0x8E85
#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_EXT 0x8E86
#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_EXT 0x8E89
#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_EXT 0x8E8A
#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_EXT 0x886C
#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_EXT 0x886D
#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_EXT 0x8E1E
#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT 0x8E1F
#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_EXT 0x92CD
#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_EXT 0x92CE
#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_EXT 0x92D3
#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_EXT 0x92D4
#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_EXT 0x90CB
#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_EXT 0x90CC
#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_EXT 0x90D8
#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_EXT 0x90D9
#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED 0x8221
#define GL_IS_PER_PATCH_EXT 0x92E7
#define GL_REFERENCED_BY_TESS_CONTROL_SHADER_EXT 0x9307
#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER_EXT 0x9308
#define GL_TESS_CONTROL_SHADER_EXT 0x8E88
#define GL_TESS_EVALUATION_SHADER_EXT 0x8E87
#define GL_TESS_CONTROL_SHADER_BIT_EXT 0x00000008
#define GL_TESS_EVALUATION_SHADER_BIT_EXT 0x00000010
typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIEXTPROC) (GLenum pname, GLint value);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glPatchParameteriEXT (GLenum pname, GLint value);
#endif
#endif /* GL_EXT_tessellation_shader */
#ifndef GL_EXT_texture_border_clamp
#define GL_EXT_texture_border_clamp 1
#define GL_TEXTURE_BORDER_COLOR_EXT 0x1004
#define GL_CLAMP_TO_BORDER_EXT 0x812D
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, const GLint *params);
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, const GLuint *params);
typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, GLuint *params);
typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVEXTPROC) (GLuint sampler, GLenum pname, const GLint *param);
typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVEXTPROC) (GLuint sampler, GLenum pname, const GLuint *param);
typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVEXTPROC) (GLuint sampler, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVEXTPROC) (GLuint sampler, GLenum pname, GLuint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexParameterIivEXT (GLenum target, GLenum pname, const GLint *params);
GL_APICALL void GL_APIENTRY glTexParameterIuivEXT (GLenum target, GLenum pname, const GLuint *params);
GL_APICALL void GL_APIENTRY glGetTexParameterIivEXT (GLenum target, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetTexParameterIuivEXT (GLenum target, GLenum pname, GLuint *params);
GL_APICALL void GL_APIENTRY glSamplerParameterIivEXT (GLuint sampler, GLenum pname, const GLint *param);
GL_APICALL void GL_APIENTRY glSamplerParameterIuivEXT (GLuint sampler, GLenum pname, const GLuint *param);
GL_APICALL void GL_APIENTRY glGetSamplerParameterIivEXT (GLuint sampler, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetSamplerParameterIuivEXT (GLuint sampler, GLenum pname, GLuint *params);
#endif
#endif /* GL_EXT_texture_border_clamp */
#ifndef GL_EXT_texture_buffer
#define GL_EXT_texture_buffer 1
#define GL_TEXTURE_BUFFER_EXT 0x8C2A
#define GL_TEXTURE_BUFFER_BINDING_EXT 0x8C2A
#define GL_MAX_TEXTURE_BUFFER_SIZE_EXT 0x8C2B
#define GL_TEXTURE_BINDING_BUFFER_EXT 0x8C2C
#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT 0x8C2D
#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_EXT 0x919F
#define GL_SAMPLER_BUFFER_EXT 0x8DC2
#define GL_INT_SAMPLER_BUFFER_EXT 0x8DD0
#define GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT 0x8DD8
#define GL_IMAGE_BUFFER_EXT 0x9051
#define GL_INT_IMAGE_BUFFER_EXT 0x905C
#define GL_UNSIGNED_INT_IMAGE_BUFFER_EXT 0x9067
#define GL_TEXTURE_BUFFER_OFFSET_EXT 0x919D
#define GL_TEXTURE_BUFFER_SIZE_EXT 0x919E
typedef void (GL_APIENTRYP PFNGLTEXBUFFEREXTPROC) (GLenum target, GLenum internalformat, GLuint buffer);
typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEEXTPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexBufferEXT (GLenum target, GLenum internalformat, GLuint buffer);
GL_APICALL void GL_APIENTRY glTexBufferRangeEXT (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
#endif
#endif /* GL_EXT_texture_buffer */
#ifndef GL_EXT_texture_compression_astc_decode_mode
#define GL_EXT_texture_compression_astc_decode_mode 1
#define GL_TEXTURE_ASTC_DECODE_PRECISION_EXT 0x8F69
#endif /* GL_EXT_texture_compression_astc_decode_mode */
#ifndef GL_EXT_texture_compression_bptc
#define GL_EXT_texture_compression_bptc 1
#define GL_COMPRESSED_RGBA_BPTC_UNORM_EXT 0x8E8C
#define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT 0x8E8D
#define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT 0x8E8E
#define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT 0x8E8F
#endif /* GL_EXT_texture_compression_bptc */
#ifndef GL_EXT_texture_compression_dxt1
#define GL_EXT_texture_compression_dxt1 1
#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
#endif /* GL_EXT_texture_compression_dxt1 */
#ifndef GL_EXT_texture_compression_rgtc
#define GL_EXT_texture_compression_rgtc 1
#define GL_COMPRESSED_RED_RGTC1_EXT 0x8DBB
#define GL_COMPRESSED_SIGNED_RED_RGTC1_EXT 0x8DBC
#define GL_COMPRESSED_RED_GREEN_RGTC2_EXT 0x8DBD
#define GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT 0x8DBE
#endif /* GL_EXT_texture_compression_rgtc */
#ifndef GL_EXT_texture_compression_s3tc
#define GL_EXT_texture_compression_s3tc 1
#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
#endif /* GL_EXT_texture_compression_s3tc */
#ifndef GL_EXT_texture_compression_s3tc_srgb
#define GL_EXT_texture_compression_s3tc_srgb 1
#define GL_COMPRESSED_SRGB_S3TC_DXT1_EXT 0x8C4C
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT 0x8C4D
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT 0x8C4E
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT 0x8C4F
#endif /* GL_EXT_texture_compression_s3tc_srgb */
#ifndef GL_EXT_texture_cube_map_array
#define GL_EXT_texture_cube_map_array 1
#define GL_TEXTURE_CUBE_MAP_ARRAY_EXT 0x9009
#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_EXT 0x900A
#define GL_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_EXT 0x900D
#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900E
#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900F
#define GL_IMAGE_CUBE_MAP_ARRAY_EXT 0x9054
#define GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x905F
#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x906A
#endif /* GL_EXT_texture_cube_map_array */
#ifndef GL_EXT_texture_filter_anisotropic
#define GL_EXT_texture_filter_anisotropic 1
#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE
#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF
#endif /* GL_EXT_texture_filter_anisotropic */
#ifndef GL_EXT_texture_filter_minmax
#define GL_EXT_texture_filter_minmax 1
#define GL_TEXTURE_REDUCTION_MODE_EXT 0x9366
#define GL_WEIGHTED_AVERAGE_EXT 0x9367
#endif /* GL_EXT_texture_filter_minmax */
#ifndef GL_EXT_texture_format_BGRA8888
#define GL_EXT_texture_format_BGRA8888 1
#endif /* GL_EXT_texture_format_BGRA8888 */
#ifndef GL_EXT_texture_format_sRGB_override
#define GL_EXT_texture_format_sRGB_override 1
#define GL_TEXTURE_FORMAT_SRGB_OVERRIDE_EXT 0x8FBF
#endif /* GL_EXT_texture_format_sRGB_override */
#ifndef GL_EXT_texture_mirror_clamp_to_edge
#define GL_EXT_texture_mirror_clamp_to_edge 1
#define GL_MIRROR_CLAMP_TO_EDGE_EXT 0x8743
#endif /* GL_EXT_texture_mirror_clamp_to_edge */
#ifndef GL_EXT_texture_norm16
#define GL_EXT_texture_norm16 1
#define GL_R16_EXT 0x822A
#define GL_RG16_EXT 0x822C
#define GL_RGBA16_EXT 0x805B
#define GL_RGB16_EXT 0x8054
#define GL_RGB16_SNORM_EXT 0x8F9A
#endif /* GL_EXT_texture_norm16 */
#ifndef GL_EXT_texture_query_lod
#define GL_EXT_texture_query_lod 1
#endif /* GL_EXT_texture_query_lod */
#ifndef GL_EXT_texture_rg
#define GL_EXT_texture_rg 1
#define GL_RED_EXT 0x1903
#define GL_RG_EXT 0x8227
#define GL_R8_EXT 0x8229
#define GL_RG8_EXT 0x822B
#endif /* GL_EXT_texture_rg */
#ifndef GL_EXT_texture_sRGB_R8
#define GL_EXT_texture_sRGB_R8 1
#define GL_SR8_EXT 0x8FBD
#endif /* GL_EXT_texture_sRGB_R8 */
#ifndef GL_EXT_texture_sRGB_RG8
#define GL_EXT_texture_sRGB_RG8 1
#define GL_SRG8_EXT 0x8FBE
#endif /* GL_EXT_texture_sRGB_RG8 */
#ifndef GL_EXT_texture_sRGB_decode
#define GL_EXT_texture_sRGB_decode 1
#define GL_TEXTURE_SRGB_DECODE_EXT 0x8A48
#define GL_DECODE_EXT 0x8A49
#define GL_SKIP_DECODE_EXT 0x8A4A
#endif /* GL_EXT_texture_sRGB_decode */
#ifndef GL_EXT_texture_shadow_lod
#define GL_EXT_texture_shadow_lod 1
#endif /* GL_EXT_texture_shadow_lod */
#ifndef GL_EXT_texture_storage
#define GL_EXT_texture_storage 1
#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F
#define GL_ALPHA8_EXT 0x803C
#define GL_LUMINANCE8_EXT 0x8040
#define GL_LUMINANCE8_ALPHA8_EXT 0x8045
#define GL_RGBA32F_EXT 0x8814
#define GL_RGB32F_EXT 0x8815
#define GL_ALPHA32F_EXT 0x8816
#define GL_LUMINANCE32F_EXT 0x8818
#define GL_LUMINANCE_ALPHA32F_EXT 0x8819
#define GL_ALPHA16F_EXT 0x881C
#define GL_LUMINANCE16F_EXT 0x881E
#define GL_LUMINANCE_ALPHA16F_EXT 0x881F
#define GL_R32F_EXT 0x822E
#define GL_RG32F_EXT 0x8230
typedef void (GL_APIENTRYP PFNGLTEXSTORAGE1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexStorage1DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
GL_APICALL void GL_APIENTRY glTexStorage2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glTexStorage3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
GL_APICALL void GL_APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
GL_APICALL void GL_APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
#endif
#endif /* GL_EXT_texture_storage */
#ifndef GL_EXT_texture_storage_compression
#define GL_EXT_texture_storage_compression 1
#define GL_NUM_SURFACE_COMPRESSION_FIXED_RATES_EXT 0x8F6E
#define GL_SURFACE_COMPRESSION_FIXED_RATE_1BPC_EXT 0x96C4
#define GL_SURFACE_COMPRESSION_FIXED_RATE_2BPC_EXT 0x96C5
#define GL_SURFACE_COMPRESSION_FIXED_RATE_3BPC_EXT 0x96C6
#define GL_SURFACE_COMPRESSION_FIXED_RATE_4BPC_EXT 0x96C7
#define GL_SURFACE_COMPRESSION_FIXED_RATE_5BPC_EXT 0x96C8
#define GL_SURFACE_COMPRESSION_FIXED_RATE_6BPC_EXT 0x96C9
#define GL_SURFACE_COMPRESSION_FIXED_RATE_7BPC_EXT 0x96CA
#define GL_SURFACE_COMPRESSION_FIXED_RATE_8BPC_EXT 0x96CB
#define GL_SURFACE_COMPRESSION_FIXED_RATE_9BPC_EXT 0x96CC
#define GL_SURFACE_COMPRESSION_FIXED_RATE_10BPC_EXT 0x96CD
#define GL_SURFACE_COMPRESSION_FIXED_RATE_11BPC_EXT 0x96CE
#define GL_SURFACE_COMPRESSION_FIXED_RATE_12BPC_EXT 0x96CF
typedef void (GL_APIENTRYP PFNGLTEXSTORAGEATTRIBS2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, const GLint* attrib_list);
typedef void (GL_APIENTRYP PFNGLTEXSTORAGEATTRIBS3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, const GLint* attrib_list);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexStorageAttribs2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, const GLint* attrib_list);
GL_APICALL void GL_APIENTRY glTexStorageAttribs3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, const GLint* attrib_list);
#endif
#endif /* GL_EXT_texture_storage_compression */
#ifndef GL_EXT_texture_type_2_10_10_10_REV
#define GL_EXT_texture_type_2_10_10_10_REV 1
#define GL_UNSIGNED_INT_2_10_10_10_REV_EXT 0x8368
#endif /* GL_EXT_texture_type_2_10_10_10_REV */
#ifndef GL_EXT_texture_view
#define GL_EXT_texture_view 1
#define GL_TEXTURE_VIEW_MIN_LEVEL_EXT 0x82DB
#define GL_TEXTURE_VIEW_NUM_LEVELS_EXT 0x82DC
#define GL_TEXTURE_VIEW_MIN_LAYER_EXT 0x82DD
#define GL_TEXTURE_VIEW_NUM_LAYERS_EXT 0x82DE
typedef void (GL_APIENTRYP PFNGLTEXTUREVIEWEXTPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTextureViewEXT (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
#endif
#endif /* GL_EXT_texture_view */
#ifndef GL_EXT_unpack_subimage
#define GL_EXT_unpack_subimage 1
#define GL_UNPACK_ROW_LENGTH_EXT 0x0CF2
#define GL_UNPACK_SKIP_ROWS_EXT 0x0CF3
#define GL_UNPACK_SKIP_PIXELS_EXT 0x0CF4
#endif /* GL_EXT_unpack_subimage */
#ifndef GL_EXT_win32_keyed_mutex
#define GL_EXT_win32_keyed_mutex 1
typedef GLboolean (GL_APIENTRYP PFNGLACQUIREKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key, GLuint timeout);
typedef GLboolean (GL_APIENTRYP PFNGLRELEASEKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLboolean GL_APIENTRY glAcquireKeyedMutexWin32EXT (GLuint memory, GLuint64 key, GLuint timeout);
GL_APICALL GLboolean GL_APIENTRY glReleaseKeyedMutexWin32EXT (GLuint memory, GLuint64 key);
#endif
#endif /* GL_EXT_win32_keyed_mutex */
#ifndef GL_EXT_window_rectangles
#define GL_EXT_window_rectangles 1
#define GL_INCLUSIVE_EXT 0x8F10
#define GL_EXCLUSIVE_EXT 0x8F11
#define GL_WINDOW_RECTANGLE_EXT 0x8F12
#define GL_WINDOW_RECTANGLE_MODE_EXT 0x8F13
#define GL_MAX_WINDOW_RECTANGLES_EXT 0x8F14
#define GL_NUM_WINDOW_RECTANGLES_EXT 0x8F15
typedef void (GL_APIENTRYP PFNGLWINDOWRECTANGLESEXTPROC) (GLenum mode, GLsizei count, const GLint *box);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glWindowRectanglesEXT (GLenum mode, GLsizei count, const GLint *box);
#endif
#endif /* GL_EXT_window_rectangles */
#ifndef GL_FJ_shader_binary_GCCSO
#define GL_FJ_shader_binary_GCCSO 1
#define GL_GCCSO_SHADER_BINARY_FJ 0x9260
#endif /* GL_FJ_shader_binary_GCCSO */
#ifndef GL_IMG_bindless_texture
#define GL_IMG_bindless_texture 1
typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTUREHANDLEIMGPROC) (GLuint texture);
typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTURESAMPLERHANDLEIMGPROC) (GLuint texture, GLuint sampler);
typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64IMGPROC) (GLint location, GLuint64 value);
typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64VIMGPROC) (GLint location, GLsizei count, const GLuint64 *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64IMGPROC) (GLuint program, GLint location, GLuint64 value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VIMGPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLuint64 GL_APIENTRY glGetTextureHandleIMG (GLuint texture);
GL_APICALL GLuint64 GL_APIENTRY glGetTextureSamplerHandleIMG (GLuint texture, GLuint sampler);
GL_APICALL void GL_APIENTRY glUniformHandleui64IMG (GLint location, GLuint64 value);
GL_APICALL void GL_APIENTRY glUniformHandleui64vIMG (GLint location, GLsizei count, const GLuint64 *value);
GL_APICALL void GL_APIENTRY glProgramUniformHandleui64IMG (GLuint program, GLint location, GLuint64 value);
GL_APICALL void GL_APIENTRY glProgramUniformHandleui64vIMG (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
#endif
#endif /* GL_IMG_bindless_texture */
#ifndef GL_IMG_framebuffer_downsample
#define GL_IMG_framebuffer_downsample 1
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_AND_DOWNSAMPLE_IMG 0x913C
#define GL_NUM_DOWNSAMPLE_SCALES_IMG 0x913D
#define GL_DOWNSAMPLE_SCALES_IMG 0x913E
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SCALE_IMG 0x913F
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DDOWNSAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERDOWNSAMPLEIMGPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferTexture2DDownsampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale);
GL_APICALL void GL_APIENTRY glFramebufferTextureLayerDownsampleIMG (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale);
#endif
#endif /* GL_IMG_framebuffer_downsample */
#ifndef GL_IMG_multisampled_render_to_texture
#define GL_IMG_multisampled_render_to_texture 1
#define GL_RENDERBUFFER_SAMPLES_IMG 0x9133
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG 0x9134
#define GL_MAX_SAMPLES_IMG 0x9135
#define GL_TEXTURE_SAMPLES_IMG 0x9136
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleIMG (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples);
#endif
#endif /* GL_IMG_multisampled_render_to_texture */
#ifndef GL_IMG_program_binary
#define GL_IMG_program_binary 1
#define GL_SGX_PROGRAM_BINARY_IMG 0x9130
#endif /* GL_IMG_program_binary */
#ifndef GL_IMG_read_format
#define GL_IMG_read_format 1
#define GL_BGRA_IMG 0x80E1
#define GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG 0x8365
#endif /* GL_IMG_read_format */
#ifndef GL_IMG_shader_binary
#define GL_IMG_shader_binary 1
#define GL_SGX_BINARY_IMG 0x8C0A
#endif /* GL_IMG_shader_binary */
#ifndef GL_IMG_texture_compression_pvrtc
#define GL_IMG_texture_compression_pvrtc 1
#define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00
#define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01
#define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02
#define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03
#endif /* GL_IMG_texture_compression_pvrtc */
#ifndef GL_IMG_texture_compression_pvrtc2
#define GL_IMG_texture_compression_pvrtc2 1
#define GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG 0x9137
#define GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG 0x9138
#endif /* GL_IMG_texture_compression_pvrtc2 */
#ifndef GL_IMG_texture_filter_cubic
#define GL_IMG_texture_filter_cubic 1
#define GL_CUBIC_IMG 0x9139
#define GL_CUBIC_MIPMAP_NEAREST_IMG 0x913A
#define GL_CUBIC_MIPMAP_LINEAR_IMG 0x913B
#endif /* GL_IMG_texture_filter_cubic */
#ifndef GL_INTEL_blackhole_render
#define GL_INTEL_blackhole_render 1
#define GL_BLACKHOLE_RENDER_INTEL 0x83FC
#endif /* GL_INTEL_blackhole_render */
#ifndef GL_INTEL_conservative_rasterization
#define GL_INTEL_conservative_rasterization 1
#define GL_CONSERVATIVE_RASTERIZATION_INTEL 0x83FE
#endif /* GL_INTEL_conservative_rasterization */
#ifndef GL_INTEL_framebuffer_CMAA
#define GL_INTEL_framebuffer_CMAA 1
typedef void (GL_APIENTRYP PFNGLAPPLYFRAMEBUFFERATTACHMENTCMAAINTELPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glApplyFramebufferAttachmentCMAAINTEL (void);
#endif
#endif /* GL_INTEL_framebuffer_CMAA */
#ifndef GL_INTEL_performance_query
#define GL_INTEL_performance_query 1
#define GL_PERFQUERY_SINGLE_CONTEXT_INTEL 0x00000000
#define GL_PERFQUERY_GLOBAL_CONTEXT_INTEL 0x00000001
#define GL_PERFQUERY_WAIT_INTEL 0x83FB
#define GL_PERFQUERY_FLUSH_INTEL 0x83FA
#define GL_PERFQUERY_DONOT_FLUSH_INTEL 0x83F9
#define GL_PERFQUERY_COUNTER_EVENT_INTEL 0x94F0
#define GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL 0x94F1
#define GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL 0x94F2
#define GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL 0x94F3
#define GL_PERFQUERY_COUNTER_RAW_INTEL 0x94F4
#define GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL 0x94F5
#define GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL 0x94F8
#define GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL 0x94F9
#define GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL 0x94FA
#define GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL 0x94FB
#define GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL 0x94FC
#define GL_PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL 0x94FD
#define GL_PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL 0x94FE
#define GL_PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL 0x94FF
#define GL_PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL 0x9500
typedef void (GL_APIENTRYP PFNGLBEGINPERFQUERYINTELPROC) (GLuint queryHandle);
typedef void (GL_APIENTRYP PFNGLCREATEPERFQUERYINTELPROC) (GLuint queryId, GLuint *queryHandle);
typedef void (GL_APIENTRYP PFNGLDELETEPERFQUERYINTELPROC) (GLuint queryHandle);
typedef void (GL_APIENTRYP PFNGLENDPERFQUERYINTELPROC) (GLuint queryHandle);
typedef void (GL_APIENTRYP PFNGLGETFIRSTPERFQUERYIDINTELPROC) (GLuint *queryId);
typedef void (GL_APIENTRYP PFNGLGETNEXTPERFQUERYIDINTELPROC) (GLuint queryId, GLuint *nextQueryId);
typedef void (GL_APIENTRYP PFNGLGETPERFCOUNTERINFOINTELPROC) (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue);
typedef void (GL_APIENTRYP PFNGLGETPERFQUERYDATAINTELPROC) (GLuint queryHandle, GLuint flags, GLsizei dataSize, void *data, GLuint *bytesWritten);
typedef void (GL_APIENTRYP PFNGLGETPERFQUERYIDBYNAMEINTELPROC) (GLchar *queryName, GLuint *queryId);
typedef void (GL_APIENTRYP PFNGLGETPERFQUERYINFOINTELPROC) (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBeginPerfQueryINTEL (GLuint queryHandle);
GL_APICALL void GL_APIENTRY glCreatePerfQueryINTEL (GLuint queryId, GLuint *queryHandle);
GL_APICALL void GL_APIENTRY glDeletePerfQueryINTEL (GLuint queryHandle);
GL_APICALL void GL_APIENTRY glEndPerfQueryINTEL (GLuint queryHandle);
GL_APICALL void GL_APIENTRY glGetFirstPerfQueryIdINTEL (GLuint *queryId);
GL_APICALL void GL_APIENTRY glGetNextPerfQueryIdINTEL (GLuint queryId, GLuint *nextQueryId);
GL_APICALL void GL_APIENTRY glGetPerfCounterInfoINTEL (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue);
GL_APICALL void GL_APIENTRY glGetPerfQueryDataINTEL (GLuint queryHandle, GLuint flags, GLsizei dataSize, void *data, GLuint *bytesWritten);
GL_APICALL void GL_APIENTRY glGetPerfQueryIdByNameINTEL (GLchar *queryName, GLuint *queryId);
GL_APICALL void GL_APIENTRY glGetPerfQueryInfoINTEL (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask);
#endif
#endif /* GL_INTEL_performance_query */
#ifndef GL_MESA_bgra
#define GL_MESA_bgra 1
#define GL_BGR_EXT 0x80E0
#endif /* GL_MESA_bgra */
#ifndef GL_MESA_framebuffer_flip_x
#define GL_MESA_framebuffer_flip_x 1
#define GL_FRAMEBUFFER_FLIP_X_MESA 0x8BBC
#endif /* GL_MESA_framebuffer_flip_x */
#ifndef GL_MESA_framebuffer_flip_y
#define GL_MESA_framebuffer_flip_y 1
#define GL_FRAMEBUFFER_FLIP_Y_MESA 0x8BBB
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPARAMETERIMESAPROC) (GLenum target, GLenum pname, GLint param);
typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVMESAPROC) (GLenum target, GLenum pname, GLint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferParameteriMESA (GLenum target, GLenum pname, GLint param);
GL_APICALL void GL_APIENTRY glGetFramebufferParameterivMESA (GLenum target, GLenum pname, GLint *params);
#endif
#endif /* GL_MESA_framebuffer_flip_y */
#ifndef GL_MESA_framebuffer_swap_xy
#define GL_MESA_framebuffer_swap_xy 1
#define GL_FRAMEBUFFER_SWAP_XY_MESA 0x8BBD
#endif /* GL_MESA_framebuffer_swap_xy */
#ifndef GL_MESA_program_binary_formats
#define GL_MESA_program_binary_formats 1
#define GL_PROGRAM_BINARY_FORMAT_MESA 0x875F
#endif /* GL_MESA_program_binary_formats */
#ifndef GL_MESA_shader_integer_functions
#define GL_MESA_shader_integer_functions 1
#endif /* GL_MESA_shader_integer_functions */
#ifndef GL_NVX_blend_equation_advanced_multi_draw_buffers
#define GL_NVX_blend_equation_advanced_multi_draw_buffers 1
#endif /* GL_NVX_blend_equation_advanced_multi_draw_buffers */
#ifndef GL_NV_bindless_texture
#define GL_NV_bindless_texture 1
typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTUREHANDLENVPROC) (GLuint texture);
typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTURESAMPLERHANDLENVPROC) (GLuint texture, GLuint sampler);
typedef void (GL_APIENTRYP PFNGLMAKETEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle);
typedef void (GL_APIENTRYP PFNGLMAKETEXTUREHANDLENONRESIDENTNVPROC) (GLuint64 handle);
typedef GLuint64 (GL_APIENTRYP PFNGLGETIMAGEHANDLENVPROC) (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
typedef void (GL_APIENTRYP PFNGLMAKEIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle, GLenum access);
typedef void (GL_APIENTRYP PFNGLMAKEIMAGEHANDLENONRESIDENTNVPROC) (GLuint64 handle);
typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64NVPROC) (GLint location, GLuint64 value);
typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64VNVPROC) (GLint location, GLsizei count, const GLuint64 *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64NVPROC) (GLuint program, GLint location, GLuint64 value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle);
typedef GLboolean (GL_APIENTRYP PFNGLISIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLuint64 GL_APIENTRY glGetTextureHandleNV (GLuint texture);
GL_APICALL GLuint64 GL_APIENTRY glGetTextureSamplerHandleNV (GLuint texture, GLuint sampler);
GL_APICALL void GL_APIENTRY glMakeTextureHandleResidentNV (GLuint64 handle);
GL_APICALL void GL_APIENTRY glMakeTextureHandleNonResidentNV (GLuint64 handle);
GL_APICALL GLuint64 GL_APIENTRY glGetImageHandleNV (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
GL_APICALL void GL_APIENTRY glMakeImageHandleResidentNV (GLuint64 handle, GLenum access);
GL_APICALL void GL_APIENTRY glMakeImageHandleNonResidentNV (GLuint64 handle);
GL_APICALL void GL_APIENTRY glUniformHandleui64NV (GLint location, GLuint64 value);
GL_APICALL void GL_APIENTRY glUniformHandleui64vNV (GLint location, GLsizei count, const GLuint64 *value);
GL_APICALL void GL_APIENTRY glProgramUniformHandleui64NV (GLuint program, GLint location, GLuint64 value);
GL_APICALL void GL_APIENTRY glProgramUniformHandleui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
GL_APICALL GLboolean GL_APIENTRY glIsTextureHandleResidentNV (GLuint64 handle);
GL_APICALL GLboolean GL_APIENTRY glIsImageHandleResidentNV (GLuint64 handle);
#endif
#endif /* GL_NV_bindless_texture */
#ifndef GL_NV_blend_equation_advanced
#define GL_NV_blend_equation_advanced 1
#define GL_BLEND_OVERLAP_NV 0x9281
#define GL_BLEND_PREMULTIPLIED_SRC_NV 0x9280
#define GL_BLUE_NV 0x1905
#define GL_COLORBURN_NV 0x929A
#define GL_COLORDODGE_NV 0x9299
#define GL_CONJOINT_NV 0x9284
#define GL_CONTRAST_NV 0x92A1
#define GL_DARKEN_NV 0x9297
#define GL_DIFFERENCE_NV 0x929E
#define GL_DISJOINT_NV 0x9283
#define GL_DST_ATOP_NV 0x928F
#define GL_DST_IN_NV 0x928B
#define GL_DST_NV 0x9287
#define GL_DST_OUT_NV 0x928D
#define GL_DST_OVER_NV 0x9289
#define GL_EXCLUSION_NV 0x92A0
#define GL_GREEN_NV 0x1904
#define GL_HARDLIGHT_NV 0x929B
#define GL_HARDMIX_NV 0x92A9
#define GL_HSL_COLOR_NV 0x92AF
#define GL_HSL_HUE_NV 0x92AD
#define GL_HSL_LUMINOSITY_NV 0x92B0
#define GL_HSL_SATURATION_NV 0x92AE
#define GL_INVERT_OVG_NV 0x92B4
#define GL_INVERT_RGB_NV 0x92A3
#define GL_LIGHTEN_NV 0x9298
#define GL_LINEARBURN_NV 0x92A5
#define GL_LINEARDODGE_NV 0x92A4
#define GL_LINEARLIGHT_NV 0x92A7
#define GL_MINUS_CLAMPED_NV 0x92B3
#define GL_MINUS_NV 0x929F
#define GL_MULTIPLY_NV 0x9294
#define GL_OVERLAY_NV 0x9296
#define GL_PINLIGHT_NV 0x92A8
#define GL_PLUS_CLAMPED_ALPHA_NV 0x92B2
#define GL_PLUS_CLAMPED_NV 0x92B1
#define GL_PLUS_DARKER_NV 0x9292
#define GL_PLUS_NV 0x9291
#define GL_RED_NV 0x1903
#define GL_SCREEN_NV 0x9295
#define GL_SOFTLIGHT_NV 0x929C
#define GL_SRC_ATOP_NV 0x928E
#define GL_SRC_IN_NV 0x928A
#define GL_SRC_NV 0x9286
#define GL_SRC_OUT_NV 0x928C
#define GL_SRC_OVER_NV 0x9288
#define GL_UNCORRELATED_NV 0x9282
#define GL_VIVIDLIGHT_NV 0x92A6
#define GL_XOR_NV 0x1506
typedef void (GL_APIENTRYP PFNGLBLENDPARAMETERINVPROC) (GLenum pname, GLint value);
typedef void (GL_APIENTRYP PFNGLBLENDBARRIERNVPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBlendParameteriNV (GLenum pname, GLint value);
GL_APICALL void GL_APIENTRY glBlendBarrierNV (void);
#endif
#endif /* GL_NV_blend_equation_advanced */
#ifndef GL_NV_blend_equation_advanced_coherent
#define GL_NV_blend_equation_advanced_coherent 1
#define GL_BLEND_ADVANCED_COHERENT_NV 0x9285
#endif /* GL_NV_blend_equation_advanced_coherent */
#ifndef GL_NV_blend_minmax_factor
#define GL_NV_blend_minmax_factor 1
#define GL_FACTOR_MIN_AMD 0x901C
#define GL_FACTOR_MAX_AMD 0x901D
#endif /* GL_NV_blend_minmax_factor */
#ifndef GL_NV_clip_space_w_scaling
#define GL_NV_clip_space_w_scaling 1
#define GL_VIEWPORT_POSITION_W_SCALE_NV 0x937C
#define GL_VIEWPORT_POSITION_W_SCALE_X_COEFF_NV 0x937D
#define GL_VIEWPORT_POSITION_W_SCALE_Y_COEFF_NV 0x937E
typedef void (GL_APIENTRYP PFNGLVIEWPORTPOSITIONWSCALENVPROC) (GLuint index, GLfloat xcoeff, GLfloat ycoeff);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glViewportPositionWScaleNV (GLuint index, GLfloat xcoeff, GLfloat ycoeff);
#endif
#endif /* GL_NV_clip_space_w_scaling */
#ifndef GL_NV_compute_shader_derivatives
#define GL_NV_compute_shader_derivatives 1
#endif /* GL_NV_compute_shader_derivatives */
#ifndef GL_NV_conditional_render
#define GL_NV_conditional_render 1
#define GL_QUERY_WAIT_NV 0x8E13
#define GL_QUERY_NO_WAIT_NV 0x8E14
#define GL_QUERY_BY_REGION_WAIT_NV 0x8E15
#define GL_QUERY_BY_REGION_NO_WAIT_NV 0x8E16
typedef void (GL_APIENTRYP PFNGLBEGINCONDITIONALRENDERNVPROC) (GLuint id, GLenum mode);
typedef void (GL_APIENTRYP PFNGLENDCONDITIONALRENDERNVPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBeginConditionalRenderNV (GLuint id, GLenum mode);
GL_APICALL void GL_APIENTRY glEndConditionalRenderNV (void);
#endif
#endif /* GL_NV_conditional_render */
#ifndef GL_NV_conservative_raster
#define GL_NV_conservative_raster 1
#define GL_CONSERVATIVE_RASTERIZATION_NV 0x9346
#define GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV 0x9347
#define GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV 0x9348
#define GL_MAX_SUBPIXEL_PRECISION_BIAS_BITS_NV 0x9349
typedef void (GL_APIENTRYP PFNGLSUBPIXELPRECISIONBIASNVPROC) (GLuint xbits, GLuint ybits);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glSubpixelPrecisionBiasNV (GLuint xbits, GLuint ybits);
#endif
#endif /* GL_NV_conservative_raster */
#ifndef GL_NV_conservative_raster_pre_snap
#define GL_NV_conservative_raster_pre_snap 1
#define GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_NV 0x9550
#endif /* GL_NV_conservative_raster_pre_snap */
#ifndef GL_NV_conservative_raster_pre_snap_triangles
#define GL_NV_conservative_raster_pre_snap_triangles 1
#define GL_CONSERVATIVE_RASTER_MODE_NV 0x954D
#define GL_CONSERVATIVE_RASTER_MODE_POST_SNAP_NV 0x954E
#define GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_TRIANGLES_NV 0x954F
typedef void (GL_APIENTRYP PFNGLCONSERVATIVERASTERPARAMETERINVPROC) (GLenum pname, GLint param);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glConservativeRasterParameteriNV (GLenum pname, GLint param);
#endif
#endif /* GL_NV_conservative_raster_pre_snap_triangles */
#ifndef GL_NV_copy_buffer
#define GL_NV_copy_buffer 1
#define GL_COPY_READ_BUFFER_NV 0x8F36
#define GL_COPY_WRITE_BUFFER_NV 0x8F37
typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATANVPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCopyBufferSubDataNV (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
#endif
#endif /* GL_NV_copy_buffer */
#ifndef GL_NV_coverage_sample
#define GL_NV_coverage_sample 1
#define GL_COVERAGE_COMPONENT_NV 0x8ED0
#define GL_COVERAGE_COMPONENT4_NV 0x8ED1
#define GL_COVERAGE_ATTACHMENT_NV 0x8ED2
#define GL_COVERAGE_BUFFERS_NV 0x8ED3
#define GL_COVERAGE_SAMPLES_NV 0x8ED4
#define GL_COVERAGE_ALL_FRAGMENTS_NV 0x8ED5
#define GL_COVERAGE_EDGE_FRAGMENTS_NV 0x8ED6
#define GL_COVERAGE_AUTOMATIC_NV 0x8ED7
#define GL_COVERAGE_BUFFER_BIT_NV 0x00008000
typedef void (GL_APIENTRYP PFNGLCOVERAGEMASKNVPROC) (GLboolean mask);
typedef void (GL_APIENTRYP PFNGLCOVERAGEOPERATIONNVPROC) (GLenum operation);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCoverageMaskNV (GLboolean mask);
GL_APICALL void GL_APIENTRY glCoverageOperationNV (GLenum operation);
#endif
#endif /* GL_NV_coverage_sample */
#ifndef GL_NV_depth_nonlinear
#define GL_NV_depth_nonlinear 1
#define GL_DEPTH_COMPONENT16_NONLINEAR_NV 0x8E2C
#endif /* GL_NV_depth_nonlinear */
#ifndef GL_NV_draw_buffers
#define GL_NV_draw_buffers 1
#define GL_MAX_DRAW_BUFFERS_NV 0x8824
#define GL_DRAW_BUFFER0_NV 0x8825
#define GL_DRAW_BUFFER1_NV 0x8826
#define GL_DRAW_BUFFER2_NV 0x8827
#define GL_DRAW_BUFFER3_NV 0x8828
#define GL_DRAW_BUFFER4_NV 0x8829
#define GL_DRAW_BUFFER5_NV 0x882A
#define GL_DRAW_BUFFER6_NV 0x882B
#define GL_DRAW_BUFFER7_NV 0x882C
#define GL_DRAW_BUFFER8_NV 0x882D
#define GL_DRAW_BUFFER9_NV 0x882E
#define GL_DRAW_BUFFER10_NV 0x882F
#define GL_DRAW_BUFFER11_NV 0x8830
#define GL_DRAW_BUFFER12_NV 0x8831
#define GL_DRAW_BUFFER13_NV 0x8832
#define GL_DRAW_BUFFER14_NV 0x8833
#define GL_DRAW_BUFFER15_NV 0x8834
#define GL_COLOR_ATTACHMENT0_NV 0x8CE0
#define GL_COLOR_ATTACHMENT1_NV 0x8CE1
#define GL_COLOR_ATTACHMENT2_NV 0x8CE2
#define GL_COLOR_ATTACHMENT3_NV 0x8CE3
#define GL_COLOR_ATTACHMENT4_NV 0x8CE4
#define GL_COLOR_ATTACHMENT5_NV 0x8CE5
#define GL_COLOR_ATTACHMENT6_NV 0x8CE6
#define GL_COLOR_ATTACHMENT7_NV 0x8CE7
#define GL_COLOR_ATTACHMENT8_NV 0x8CE8
#define GL_COLOR_ATTACHMENT9_NV 0x8CE9
#define GL_COLOR_ATTACHMENT10_NV 0x8CEA
#define GL_COLOR_ATTACHMENT11_NV 0x8CEB
#define GL_COLOR_ATTACHMENT12_NV 0x8CEC
#define GL_COLOR_ATTACHMENT13_NV 0x8CED
#define GL_COLOR_ATTACHMENT14_NV 0x8CEE
#define GL_COLOR_ATTACHMENT15_NV 0x8CEF
typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSNVPROC) (GLsizei n, const GLenum *bufs);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawBuffersNV (GLsizei n, const GLenum *bufs);
#endif
#endif /* GL_NV_draw_buffers */
#ifndef GL_NV_draw_instanced
#define GL_NV_draw_instanced 1
typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDNVPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDNVPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawArraysInstancedNV (GLenum mode, GLint first, GLsizei count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedNV (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
#endif
#endif /* GL_NV_draw_instanced */
#ifndef GL_NV_draw_vulkan_image
#define GL_NV_draw_vulkan_image 1
typedef void (GL_APIENTRY *GLVULKANPROCNV)(void);
typedef void (GL_APIENTRYP PFNGLDRAWVKIMAGENVPROC) (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1);
typedef GLVULKANPROCNV (GL_APIENTRYP PFNGLGETVKPROCADDRNVPROC) (const GLchar *name);
typedef void (GL_APIENTRYP PFNGLWAITVKSEMAPHORENVPROC) (GLuint64 vkSemaphore);
typedef void (GL_APIENTRYP PFNGLSIGNALVKSEMAPHORENVPROC) (GLuint64 vkSemaphore);
typedef void (GL_APIENTRYP PFNGLSIGNALVKFENCENVPROC) (GLuint64 vkFence);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawVkImageNV (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1);
GL_APICALL GLVULKANPROCNV GL_APIENTRY glGetVkProcAddrNV (const GLchar *name);
GL_APICALL void GL_APIENTRY glWaitVkSemaphoreNV (GLuint64 vkSemaphore);
GL_APICALL void GL_APIENTRY glSignalVkSemaphoreNV (GLuint64 vkSemaphore);
GL_APICALL void GL_APIENTRY glSignalVkFenceNV (GLuint64 vkFence);
#endif
#endif /* GL_NV_draw_vulkan_image */
#ifndef GL_NV_explicit_attrib_location
#define GL_NV_explicit_attrib_location 1
#endif /* GL_NV_explicit_attrib_location */
#ifndef GL_NV_fbo_color_attachments
#define GL_NV_fbo_color_attachments 1
#define GL_MAX_COLOR_ATTACHMENTS_NV 0x8CDF
#endif /* GL_NV_fbo_color_attachments */
#ifndef GL_NV_fence
#define GL_NV_fence 1
#define GL_ALL_COMPLETED_NV 0x84F2
#define GL_FENCE_STATUS_NV 0x84F3
#define GL_FENCE_CONDITION_NV 0x84F4
typedef void (GL_APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences);
typedef void (GL_APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences);
typedef GLboolean (GL_APIENTRYP PFNGLISFENCENVPROC) (GLuint fence);
typedef GLboolean (GL_APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence);
typedef void (GL_APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence);
typedef void (GL_APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences);
GL_APICALL void GL_APIENTRY glGenFencesNV (GLsizei n, GLuint *fences);
GL_APICALL GLboolean GL_APIENTRY glIsFenceNV (GLuint fence);
GL_APICALL GLboolean GL_APIENTRY glTestFenceNV (GLuint fence);
GL_APICALL void GL_APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glFinishFenceNV (GLuint fence);
GL_APICALL void GL_APIENTRY glSetFenceNV (GLuint fence, GLenum condition);
#endif
#endif /* GL_NV_fence */
#ifndef GL_NV_fill_rectangle
#define GL_NV_fill_rectangle 1
#define GL_FILL_RECTANGLE_NV 0x933C
#endif /* GL_NV_fill_rectangle */
#ifndef GL_NV_fragment_coverage_to_color
#define GL_NV_fragment_coverage_to_color 1
#define GL_FRAGMENT_COVERAGE_TO_COLOR_NV 0x92DD
#define GL_FRAGMENT_COVERAGE_COLOR_NV 0x92DE
typedef void (GL_APIENTRYP PFNGLFRAGMENTCOVERAGECOLORNVPROC) (GLuint color);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFragmentCoverageColorNV (GLuint color);
#endif
#endif /* GL_NV_fragment_coverage_to_color */
#ifndef GL_NV_fragment_shader_barycentric
#define GL_NV_fragment_shader_barycentric 1
#endif /* GL_NV_fragment_shader_barycentric */
#ifndef GL_NV_fragment_shader_interlock
#define GL_NV_fragment_shader_interlock 1
#endif /* GL_NV_fragment_shader_interlock */
#ifndef GL_NV_framebuffer_blit
#define GL_NV_framebuffer_blit 1
#define GL_READ_FRAMEBUFFER_NV 0x8CA8
#define GL_DRAW_FRAMEBUFFER_NV 0x8CA9
#define GL_DRAW_FRAMEBUFFER_BINDING_NV 0x8CA6
#define GL_READ_FRAMEBUFFER_BINDING_NV 0x8CAA
typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERNVPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBlitFramebufferNV (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
#endif
#endif /* GL_NV_framebuffer_blit */
#ifndef GL_NV_framebuffer_mixed_samples
#define GL_NV_framebuffer_mixed_samples 1
#define GL_COVERAGE_MODULATION_TABLE_NV 0x9331
#define GL_COLOR_SAMPLES_NV 0x8E20
#define GL_DEPTH_SAMPLES_NV 0x932D
#define GL_STENCIL_SAMPLES_NV 0x932E
#define GL_MIXED_DEPTH_SAMPLES_SUPPORTED_NV 0x932F
#define GL_MIXED_STENCIL_SAMPLES_SUPPORTED_NV 0x9330
#define GL_COVERAGE_MODULATION_NV 0x9332
#define GL_COVERAGE_MODULATION_TABLE_SIZE_NV 0x9333
typedef void (GL_APIENTRYP PFNGLCOVERAGEMODULATIONTABLENVPROC) (GLsizei n, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLGETCOVERAGEMODULATIONTABLENVPROC) (GLsizei bufSize, GLfloat *v);
typedef void (GL_APIENTRYP PFNGLCOVERAGEMODULATIONNVPROC) (GLenum components);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCoverageModulationTableNV (GLsizei n, const GLfloat *v);
GL_APICALL void GL_APIENTRY glGetCoverageModulationTableNV (GLsizei bufSize, GLfloat *v);
GL_APICALL void GL_APIENTRY glCoverageModulationNV (GLenum components);
#endif
#endif /* GL_NV_framebuffer_mixed_samples */
#ifndef GL_NV_framebuffer_multisample
#define GL_NV_framebuffer_multisample 1
#define GL_RENDERBUFFER_SAMPLES_NV 0x8CAB
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_NV 0x8D56
#define GL_MAX_SAMPLES_NV 0x8D57
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLENVPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleNV (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
#endif
#endif /* GL_NV_framebuffer_multisample */
#ifndef GL_NV_generate_mipmap_sRGB
#define GL_NV_generate_mipmap_sRGB 1
#endif /* GL_NV_generate_mipmap_sRGB */
#ifndef GL_NV_geometry_shader_passthrough
#define GL_NV_geometry_shader_passthrough 1
#endif /* GL_NV_geometry_shader_passthrough */
#ifndef GL_NV_gpu_shader5
#define GL_NV_gpu_shader5 1
typedef khronos_int64_t GLint64EXT;
typedef khronos_uint64_t GLuint64EXT;
#define GL_INT64_NV 0x140E
#define GL_UNSIGNED_INT64_NV 0x140F
#define GL_INT8_NV 0x8FE0
#define GL_INT8_VEC2_NV 0x8FE1
#define GL_INT8_VEC3_NV 0x8FE2
#define GL_INT8_VEC4_NV 0x8FE3
#define GL_INT16_NV 0x8FE4
#define GL_INT16_VEC2_NV 0x8FE5
#define GL_INT16_VEC3_NV 0x8FE6
#define GL_INT16_VEC4_NV 0x8FE7
#define GL_INT64_VEC2_NV 0x8FE9
#define GL_INT64_VEC3_NV 0x8FEA
#define GL_INT64_VEC4_NV 0x8FEB
#define GL_UNSIGNED_INT8_NV 0x8FEC
#define GL_UNSIGNED_INT8_VEC2_NV 0x8FED
#define GL_UNSIGNED_INT8_VEC3_NV 0x8FEE
#define GL_UNSIGNED_INT8_VEC4_NV 0x8FEF
#define GL_UNSIGNED_INT16_NV 0x8FF0
#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1
#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2
#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3
#define GL_UNSIGNED_INT64_VEC2_NV 0x8FF5
#define GL_UNSIGNED_INT64_VEC3_NV 0x8FF6
#define GL_UNSIGNED_INT64_VEC4_NV 0x8FF7
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
#define GL_FLOAT16_VEC3_NV 0x8FFA
#define GL_FLOAT16_VEC4_NV 0x8FFB
#define GL_PATCHES 0x000E
typedef void (GL_APIENTRYP PFNGLUNIFORM1I64NVPROC) (GLint location, GLint64EXT x);
typedef void (GL_APIENTRYP PFNGLUNIFORM2I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y);
typedef void (GL_APIENTRYP PFNGLUNIFORM3I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
typedef void (GL_APIENTRYP PFNGLUNIFORM4I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
typedef void (GL_APIENTRYP PFNGLUNIFORM1I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM2I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM3I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM4I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM1UI64NVPROC) (GLint location, GLuint64EXT x);
typedef void (GL_APIENTRYP PFNGLUNIFORM2UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y);
typedef void (GL_APIENTRYP PFNGLUNIFORM3UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
typedef void (GL_APIENTRYP PFNGLUNIFORM4UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
typedef void (GL_APIENTRYP PFNGLUNIFORM1UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM2UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM3UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM4UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLGETUNIFORMI64VNVPROC) (GLuint program, GLint location, GLint64EXT *params);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1I64NVPROC) (GLuint program, GLint location, GLint64EXT x);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glUniform1i64NV (GLint location, GLint64EXT x);
GL_APICALL void GL_APIENTRY glUniform2i64NV (GLint location, GLint64EXT x, GLint64EXT y);
GL_APICALL void GL_APIENTRY glUniform3i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
GL_APICALL void GL_APIENTRY glUniform4i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
GL_APICALL void GL_APIENTRY glUniform1i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform2i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform3i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform4i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform1ui64NV (GLint location, GLuint64EXT x);
GL_APICALL void GL_APIENTRY glUniform2ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y);
GL_APICALL void GL_APIENTRY glUniform3ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
GL_APICALL void GL_APIENTRY glUniform4ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
GL_APICALL void GL_APIENTRY glUniform1ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform2ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform3ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glUniform4ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glGetUniformi64vNV (GLuint program, GLint location, GLint64EXT *params);
GL_APICALL void GL_APIENTRY glProgramUniform1i64NV (GLuint program, GLint location, GLint64EXT x);
GL_APICALL void GL_APIENTRY glProgramUniform2i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y);
GL_APICALL void GL_APIENTRY glProgramUniform3i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
GL_APICALL void GL_APIENTRY glProgramUniform4i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
GL_APICALL void GL_APIENTRY glProgramUniform1i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform2i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform3i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform4i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform1ui64NV (GLuint program, GLint location, GLuint64EXT x);
GL_APICALL void GL_APIENTRY glProgramUniform2ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y);
GL_APICALL void GL_APIENTRY glProgramUniform3ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
GL_APICALL void GL_APIENTRY glProgramUniform4ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
GL_APICALL void GL_APIENTRY glProgramUniform1ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform2ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform3ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
GL_APICALL void GL_APIENTRY glProgramUniform4ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
#endif
#endif /* GL_NV_gpu_shader5 */
#ifndef GL_NV_image_formats
#define GL_NV_image_formats 1
#endif /* GL_NV_image_formats */
#ifndef GL_NV_instanced_arrays
#define GL_NV_instanced_arrays 1
#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_NV 0x88FE
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORNVPROC) (GLuint index, GLuint divisor);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glVertexAttribDivisorNV (GLuint index, GLuint divisor);
#endif
#endif /* GL_NV_instanced_arrays */
#ifndef GL_NV_internalformat_sample_query
#define GL_NV_internalformat_sample_query 1
#define GL_TEXTURE_2D_MULTISAMPLE 0x9100
#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102
#define GL_MULTISAMPLES_NV 0x9371
#define GL_SUPERSAMPLE_SCALE_X_NV 0x9372
#define GL_SUPERSAMPLE_SCALE_Y_NV 0x9373
#define GL_CONFORMANT_NV 0x9374
typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATSAMPLEIVNVPROC) (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei count, GLint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetInternalformatSampleivNV (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei count, GLint *params);
#endif
#endif /* GL_NV_internalformat_sample_query */
#ifndef GL_NV_memory_attachment
#define GL_NV_memory_attachment 1
#define GL_ATTACHED_MEMORY_OBJECT_NV 0x95A4
#define GL_ATTACHED_MEMORY_OFFSET_NV 0x95A5
#define GL_MEMORY_ATTACHABLE_ALIGNMENT_NV 0x95A6
#define GL_MEMORY_ATTACHABLE_SIZE_NV 0x95A7
#define GL_MEMORY_ATTACHABLE_NV 0x95A8
#define GL_DETACHED_MEMORY_INCARNATION_NV 0x95A9
#define GL_DETACHED_TEXTURES_NV 0x95AA
#define GL_DETACHED_BUFFERS_NV 0x95AB
#define GL_MAX_DETACHED_TEXTURES_NV 0x95AC
#define GL_MAX_DETACHED_BUFFERS_NV 0x95AD
typedef void (GL_APIENTRYP PFNGLGETMEMORYOBJECTDETACHEDRESOURCESUIVNVPROC) (GLuint memory, GLenum pname, GLint first, GLsizei count, GLuint *params);
typedef void (GL_APIENTRYP PFNGLRESETMEMORYOBJECTPARAMETERNVPROC) (GLuint memory, GLenum pname);
typedef void (GL_APIENTRYP PFNGLTEXATTACHMEMORYNVPROC) (GLenum target, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLBUFFERATTACHMEMORYNVPROC) (GLenum target, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLTEXTUREATTACHMEMORYNVPROC) (GLuint texture, GLuint memory, GLuint64 offset);
typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERATTACHMEMORYNVPROC) (GLuint buffer, GLuint memory, GLuint64 offset);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetMemoryObjectDetachedResourcesuivNV (GLuint memory, GLenum pname, GLint first, GLsizei count, GLuint *params);
GL_APICALL void GL_APIENTRY glResetMemoryObjectParameterNV (GLuint memory, GLenum pname);
GL_APICALL void GL_APIENTRY glTexAttachMemoryNV (GLenum target, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glBufferAttachMemoryNV (GLenum target, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glTextureAttachMemoryNV (GLuint texture, GLuint memory, GLuint64 offset);
GL_APICALL void GL_APIENTRY glNamedBufferAttachMemoryNV (GLuint buffer, GLuint memory, GLuint64 offset);
#endif
#endif /* GL_NV_memory_attachment */
#ifndef GL_NV_memory_object_sparse
#define GL_NV_memory_object_sparse 1
typedef void (GL_APIENTRYP PFNGLBUFFERPAGECOMMITMENTMEMNVPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLuint memory, GLuint64 memOffset, GLboolean commit);
typedef void (GL_APIENTRYP PFNGLTEXPAGECOMMITMENTMEMNVPROC) (GLenum target, GLint layer, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset, GLboolean commit);
typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERPAGECOMMITMENTMEMNVPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLuint memory, GLuint64 memOffset, GLboolean commit);
typedef void (GL_APIENTRYP PFNGLTEXTUREPAGECOMMITMENTMEMNVPROC) (GLuint texture, GLint layer, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset, GLboolean commit);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBufferPageCommitmentMemNV (GLenum target, GLintptr offset, GLsizeiptr size, GLuint memory, GLuint64 memOffset, GLboolean commit);
GL_APICALL void GL_APIENTRY glTexPageCommitmentMemNV (GLenum target, GLint layer, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset, GLboolean commit);
GL_APICALL void GL_APIENTRY glNamedBufferPageCommitmentMemNV (GLuint buffer, GLintptr offset, GLsizeiptr size, GLuint memory, GLuint64 memOffset, GLboolean commit);
GL_APICALL void GL_APIENTRY glTexturePageCommitmentMemNV (GLuint texture, GLint layer, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset, GLboolean commit);
#endif
#endif /* GL_NV_memory_object_sparse */
#ifndef GL_NV_mesh_shader
#define GL_NV_mesh_shader 1
#define GL_MESH_SHADER_NV 0x9559
#define GL_TASK_SHADER_NV 0x955A
#define GL_MAX_MESH_UNIFORM_BLOCKS_NV 0x8E60
#define GL_MAX_MESH_TEXTURE_IMAGE_UNITS_NV 0x8E61
#define GL_MAX_MESH_IMAGE_UNIFORMS_NV 0x8E62
#define GL_MAX_MESH_UNIFORM_COMPONENTS_NV 0x8E63
#define GL_MAX_MESH_ATOMIC_COUNTER_BUFFERS_NV 0x8E64
#define GL_MAX_MESH_ATOMIC_COUNTERS_NV 0x8E65
#define GL_MAX_MESH_SHADER_STORAGE_BLOCKS_NV 0x8E66
#define GL_MAX_COMBINED_MESH_UNIFORM_COMPONENTS_NV 0x8E67
#define GL_MAX_TASK_UNIFORM_BLOCKS_NV 0x8E68
#define GL_MAX_TASK_TEXTURE_IMAGE_UNITS_NV 0x8E69
#define GL_MAX_TASK_IMAGE_UNIFORMS_NV 0x8E6A
#define GL_MAX_TASK_UNIFORM_COMPONENTS_NV 0x8E6B
#define GL_MAX_TASK_ATOMIC_COUNTER_BUFFERS_NV 0x8E6C
#define GL_MAX_TASK_ATOMIC_COUNTERS_NV 0x8E6D
#define GL_MAX_TASK_SHADER_STORAGE_BLOCKS_NV 0x8E6E
#define GL_MAX_COMBINED_TASK_UNIFORM_COMPONENTS_NV 0x8E6F
#define GL_MAX_MESH_WORK_GROUP_INVOCATIONS_NV 0x95A2
#define GL_MAX_TASK_WORK_GROUP_INVOCATIONS_NV 0x95A3
#define GL_MAX_MESH_TOTAL_MEMORY_SIZE_NV 0x9536
#define GL_MAX_TASK_TOTAL_MEMORY_SIZE_NV 0x9537
#define GL_MAX_MESH_OUTPUT_VERTICES_NV 0x9538
#define GL_MAX_MESH_OUTPUT_PRIMITIVES_NV 0x9539
#define GL_MAX_TASK_OUTPUT_COUNT_NV 0x953A
#define GL_MAX_DRAW_MESH_TASKS_COUNT_NV 0x953D
#define GL_MAX_MESH_VIEWS_NV 0x9557
#define GL_MESH_OUTPUT_PER_VERTEX_GRANULARITY_NV 0x92DF
#define GL_MESH_OUTPUT_PER_PRIMITIVE_GRANULARITY_NV 0x9543
#define GL_MAX_MESH_WORK_GROUP_SIZE_NV 0x953B
#define GL_MAX_TASK_WORK_GROUP_SIZE_NV 0x953C
#define GL_MESH_WORK_GROUP_SIZE_NV 0x953E
#define GL_TASK_WORK_GROUP_SIZE_NV 0x953F
#define GL_MESH_VERTICES_OUT_NV 0x9579
#define GL_MESH_PRIMITIVES_OUT_NV 0x957A
#define GL_MESH_OUTPUT_TYPE_NV 0x957B
#define GL_UNIFORM_BLOCK_REFERENCED_BY_MESH_SHADER_NV 0x959C
#define GL_UNIFORM_BLOCK_REFERENCED_BY_TASK_SHADER_NV 0x959D
#define GL_REFERENCED_BY_MESH_SHADER_NV 0x95A0
#define GL_REFERENCED_BY_TASK_SHADER_NV 0x95A1
#define GL_MESH_SHADER_BIT_NV 0x00000040
#define GL_TASK_SHADER_BIT_NV 0x00000080
#define GL_MESH_SUBROUTINE_NV 0x957C
#define GL_TASK_SUBROUTINE_NV 0x957D
#define GL_MESH_SUBROUTINE_UNIFORM_NV 0x957E
#define GL_TASK_SUBROUTINE_UNIFORM_NV 0x957F
#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_MESH_SHADER_NV 0x959E
#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TASK_SHADER_NV 0x959F
typedef void (GL_APIENTRYP PFNGLDRAWMESHTASKSNVPROC) (GLuint first, GLuint count);
typedef void (GL_APIENTRYP PFNGLDRAWMESHTASKSINDIRECTNVPROC) (GLintptr indirect);
typedef void (GL_APIENTRYP PFNGLMULTIDRAWMESHTASKSINDIRECTNVPROC) (GLintptr indirect, GLsizei drawcount, GLsizei stride);
typedef void (GL_APIENTRYP PFNGLMULTIDRAWMESHTASKSINDIRECTCOUNTNVPROC) (GLintptr indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glDrawMeshTasksNV (GLuint first, GLuint count);
GL_APICALL void GL_APIENTRY glDrawMeshTasksIndirectNV (GLintptr indirect);
GL_APICALL void GL_APIENTRY glMultiDrawMeshTasksIndirectNV (GLintptr indirect, GLsizei drawcount, GLsizei stride);
GL_APICALL void GL_APIENTRY glMultiDrawMeshTasksIndirectCountNV (GLintptr indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
#endif
#endif /* GL_NV_mesh_shader */
#ifndef GL_NV_non_square_matrices
#define GL_NV_non_square_matrices 1
#define GL_FLOAT_MAT2x3_NV 0x8B65
#define GL_FLOAT_MAT2x4_NV 0x8B66
#define GL_FLOAT_MAT3x2_NV 0x8B67
#define GL_FLOAT_MAT3x4_NV 0x8B68
#define GL_FLOAT_MAT4x2_NV 0x8B69
#define GL_FLOAT_MAT4x3_NV 0x8B6A
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glUniformMatrix2x3fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix3x2fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix2x4fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix4x2fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix3x4fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix4x3fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
#endif
#endif /* GL_NV_non_square_matrices */
#ifndef GL_NV_path_rendering
#define GL_NV_path_rendering 1
typedef double GLdouble;
#define GL_PATH_FORMAT_SVG_NV 0x9070
#define GL_PATH_FORMAT_PS_NV 0x9071
#define GL_STANDARD_FONT_NAME_NV 0x9072
#define GL_SYSTEM_FONT_NAME_NV 0x9073
#define GL_FILE_NAME_NV 0x9074
#define GL_PATH_STROKE_WIDTH_NV 0x9075
#define GL_PATH_END_CAPS_NV 0x9076
#define GL_PATH_INITIAL_END_CAP_NV 0x9077
#define GL_PATH_TERMINAL_END_CAP_NV 0x9078
#define GL_PATH_JOIN_STYLE_NV 0x9079
#define GL_PATH_MITER_LIMIT_NV 0x907A
#define GL_PATH_DASH_CAPS_NV 0x907B
#define GL_PATH_INITIAL_DASH_CAP_NV 0x907C
#define GL_PATH_TERMINAL_DASH_CAP_NV 0x907D
#define GL_PATH_DASH_OFFSET_NV 0x907E
#define GL_PATH_CLIENT_LENGTH_NV 0x907F
#define GL_PATH_FILL_MODE_NV 0x9080
#define GL_PATH_FILL_MASK_NV 0x9081
#define GL_PATH_FILL_COVER_MODE_NV 0x9082
#define GL_PATH_STROKE_COVER_MODE_NV 0x9083
#define GL_PATH_STROKE_MASK_NV 0x9084
#define GL_COUNT_UP_NV 0x9088
#define GL_COUNT_DOWN_NV 0x9089
#define GL_PATH_OBJECT_BOUNDING_BOX_NV 0x908A
#define GL_CONVEX_HULL_NV 0x908B
#define GL_BOUNDING_BOX_NV 0x908D
#define GL_TRANSLATE_X_NV 0x908E
#define GL_TRANSLATE_Y_NV 0x908F
#define GL_TRANSLATE_2D_NV 0x9090
#define GL_TRANSLATE_3D_NV 0x9091
#define GL_AFFINE_2D_NV 0x9092
#define GL_AFFINE_3D_NV 0x9094
#define GL_TRANSPOSE_AFFINE_2D_NV 0x9096
#define GL_TRANSPOSE_AFFINE_3D_NV 0x9098
#define GL_UTF8_NV 0x909A
#define GL_UTF16_NV 0x909B
#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV 0x909C
#define GL_PATH_COMMAND_COUNT_NV 0x909D
#define GL_PATH_COORD_COUNT_NV 0x909E
#define GL_PATH_DASH_ARRAY_COUNT_NV 0x909F
#define GL_PATH_COMPUTED_LENGTH_NV 0x90A0
#define GL_PATH_FILL_BOUNDING_BOX_NV 0x90A1
#define GL_PATH_STROKE_BOUNDING_BOX_NV 0x90A2
#define GL_SQUARE_NV 0x90A3
#define GL_ROUND_NV 0x90A4
#define GL_TRIANGULAR_NV 0x90A5
#define GL_BEVEL_NV 0x90A6
#define GL_MITER_REVERT_NV 0x90A7
#define GL_MITER_TRUNCATE_NV 0x90A8
#define GL_SKIP_MISSING_GLYPH_NV 0x90A9
#define GL_USE_MISSING_GLYPH_NV 0x90AA
#define GL_PATH_ERROR_POSITION_NV 0x90AB
#define GL_ACCUM_ADJACENT_PAIRS_NV 0x90AD
#define GL_ADJACENT_PAIRS_NV 0x90AE
#define GL_FIRST_TO_REST_NV 0x90AF
#define GL_PATH_GEN_MODE_NV 0x90B0
#define GL_PATH_GEN_COEFF_NV 0x90B1
#define GL_PATH_GEN_COMPONENTS_NV 0x90B3
#define GL_PATH_STENCIL_FUNC_NV 0x90B7
#define GL_PATH_STENCIL_REF_NV 0x90B8
#define GL_PATH_STENCIL_VALUE_MASK_NV 0x90B9
#define GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV 0x90BD
#define GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV 0x90BE
#define GL_PATH_COVER_DEPTH_FUNC_NV 0x90BF
#define GL_PATH_DASH_OFFSET_RESET_NV 0x90B4
#define GL_MOVE_TO_RESETS_NV 0x90B5
#define GL_MOVE_TO_CONTINUES_NV 0x90B6
#define GL_CLOSE_PATH_NV 0x00
#define GL_MOVE_TO_NV 0x02
#define GL_RELATIVE_MOVE_TO_NV 0x03
#define GL_LINE_TO_NV 0x04
#define GL_RELATIVE_LINE_TO_NV 0x05
#define GL_HORIZONTAL_LINE_TO_NV 0x06
#define GL_RELATIVE_HORIZONTAL_LINE_TO_NV 0x07
#define GL_VERTICAL_LINE_TO_NV 0x08
#define GL_RELATIVE_VERTICAL_LINE_TO_NV 0x09
#define GL_QUADRATIC_CURVE_TO_NV 0x0A
#define GL_RELATIVE_QUADRATIC_CURVE_TO_NV 0x0B
#define GL_CUBIC_CURVE_TO_NV 0x0C
#define GL_RELATIVE_CUBIC_CURVE_TO_NV 0x0D
#define GL_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0E
#define GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0F
#define GL_SMOOTH_CUBIC_CURVE_TO_NV 0x10
#define GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV 0x11
#define GL_SMALL_CCW_ARC_TO_NV 0x12
#define GL_RELATIVE_SMALL_CCW_ARC_TO_NV 0x13
#define GL_SMALL_CW_ARC_TO_NV 0x14
#define GL_RELATIVE_SMALL_CW_ARC_TO_NV 0x15
#define GL_LARGE_CCW_ARC_TO_NV 0x16
#define GL_RELATIVE_LARGE_CCW_ARC_TO_NV 0x17
#define GL_LARGE_CW_ARC_TO_NV 0x18
#define GL_RELATIVE_LARGE_CW_ARC_TO_NV 0x19
#define GL_RESTART_PATH_NV 0xF0
#define GL_DUP_FIRST_CUBIC_CURVE_TO_NV 0xF2
#define GL_DUP_LAST_CUBIC_CURVE_TO_NV 0xF4
#define GL_RECT_NV 0xF6
#define GL_CIRCULAR_CCW_ARC_TO_NV 0xF8
#define GL_CIRCULAR_CW_ARC_TO_NV 0xFA
#define GL_CIRCULAR_TANGENT_ARC_TO_NV 0xFC
#define GL_ARC_TO_NV 0xFE
#define GL_RELATIVE_ARC_TO_NV 0xFF
#define GL_BOLD_BIT_NV 0x01
#define GL_ITALIC_BIT_NV 0x02
#define GL_GLYPH_WIDTH_BIT_NV 0x01
#define GL_GLYPH_HEIGHT_BIT_NV 0x02
#define GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV 0x04
#define GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV 0x08
#define GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV 0x10
#define GL_GLYPH_VERTICAL_BEARING_X_BIT_NV 0x20
#define GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV 0x40
#define GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV 0x80
#define GL_GLYPH_HAS_KERNING_BIT_NV 0x100
#define GL_FONT_X_MIN_BOUNDS_BIT_NV 0x00010000
#define GL_FONT_Y_MIN_BOUNDS_BIT_NV 0x00020000
#define GL_FONT_X_MAX_BOUNDS_BIT_NV 0x00040000
#define GL_FONT_Y_MAX_BOUNDS_BIT_NV 0x00080000
#define GL_FONT_UNITS_PER_EM_BIT_NV 0x00100000
#define GL_FONT_ASCENDER_BIT_NV 0x00200000
#define GL_FONT_DESCENDER_BIT_NV 0x00400000
#define GL_FONT_HEIGHT_BIT_NV 0x00800000
#define GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV 0x01000000
#define GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV 0x02000000
#define GL_FONT_UNDERLINE_POSITION_BIT_NV 0x04000000
#define GL_FONT_UNDERLINE_THICKNESS_BIT_NV 0x08000000
#define GL_FONT_HAS_KERNING_BIT_NV 0x10000000
#define GL_ROUNDED_RECT_NV 0xE8
#define GL_RELATIVE_ROUNDED_RECT_NV 0xE9
#define GL_ROUNDED_RECT2_NV 0xEA
#define GL_RELATIVE_ROUNDED_RECT2_NV 0xEB
#define GL_ROUNDED_RECT4_NV 0xEC
#define GL_RELATIVE_ROUNDED_RECT4_NV 0xED
#define GL_ROUNDED_RECT8_NV 0xEE
#define GL_RELATIVE_ROUNDED_RECT8_NV 0xEF
#define GL_RELATIVE_RECT_NV 0xF7
#define GL_FONT_GLYPHS_AVAILABLE_NV 0x9368
#define GL_FONT_TARGET_UNAVAILABLE_NV 0x9369
#define GL_FONT_UNAVAILABLE_NV 0x936A
#define GL_FONT_UNINTELLIGIBLE_NV 0x936B
#define GL_CONIC_CURVE_TO_NV 0x1A
#define GL_RELATIVE_CONIC_CURVE_TO_NV 0x1B
#define GL_FONT_NUM_GLYPH_INDICES_BIT_NV 0x20000000
#define GL_STANDARD_FONT_FORMAT_NV 0x936C
#define GL_PATH_PROJECTION_NV 0x1701
#define GL_PATH_MODELVIEW_NV 0x1700
#define GL_PATH_MODELVIEW_STACK_DEPTH_NV 0x0BA3
#define GL_PATH_MODELVIEW_MATRIX_NV 0x0BA6
#define GL_PATH_MAX_MODELVIEW_STACK_DEPTH_NV 0x0D36
#define GL_PATH_TRANSPOSE_MODELVIEW_MATRIX_NV 0x84E3
#define GL_PATH_PROJECTION_STACK_DEPTH_NV 0x0BA4
#define GL_PATH_PROJECTION_MATRIX_NV 0x0BA7
#define GL_PATH_MAX_PROJECTION_STACK_DEPTH_NV 0x0D38
#define GL_PATH_TRANSPOSE_PROJECTION_MATRIX_NV 0x84E4
#define GL_FRAGMENT_INPUT_NV 0x936D
typedef GLuint (GL_APIENTRYP PFNGLGENPATHSNVPROC) (GLsizei range);
typedef void (GL_APIENTRYP PFNGLDELETEPATHSNVPROC) (GLuint path, GLsizei range);
typedef GLboolean (GL_APIENTRYP PFNGLISPATHNVPROC) (GLuint path);
typedef void (GL_APIENTRYP PFNGLPATHCOMMANDSNVPROC) (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
typedef void (GL_APIENTRYP PFNGLPATHCOORDSNVPROC) (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords);
typedef void (GL_APIENTRYP PFNGLPATHSUBCOMMANDSNVPROC) (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
typedef void (GL_APIENTRYP PFNGLPATHSUBCOORDSNVPROC) (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords);
typedef void (GL_APIENTRYP PFNGLPATHSTRINGNVPROC) (GLuint path, GLenum format, GLsizei length, const void *pathString);
typedef void (GL_APIENTRYP PFNGLPATHGLYPHSNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
typedef void (GL_APIENTRYP PFNGLPATHGLYPHRANGENVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
typedef void (GL_APIENTRYP PFNGLWEIGHTPATHSNVPROC) (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights);
typedef void (GL_APIENTRYP PFNGLCOPYPATHNVPROC) (GLuint resultPath, GLuint srcPath);
typedef void (GL_APIENTRYP PFNGLINTERPOLATEPATHSNVPROC) (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight);
typedef void (GL_APIENTRYP PFNGLTRANSFORMPATHNVPROC) (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues);
typedef void (GL_APIENTRYP PFNGLPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, const GLint *value);
typedef void (GL_APIENTRYP PFNGLPATHPARAMETERINVPROC) (GLuint path, GLenum pname, GLint value);
typedef void (GL_APIENTRYP PFNGLPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLPATHPARAMETERFNVPROC) (GLuint path, GLenum pname, GLfloat value);
typedef void (GL_APIENTRYP PFNGLPATHDASHARRAYNVPROC) (GLuint path, GLsizei dashCount, const GLfloat *dashArray);
typedef void (GL_APIENTRYP PFNGLPATHSTENCILFUNCNVPROC) (GLenum func, GLint ref, GLuint mask);
typedef void (GL_APIENTRYP PFNGLPATHSTENCILDEPTHOFFSETNVPROC) (GLfloat factor, GLfloat units);
typedef void (GL_APIENTRYP PFNGLSTENCILFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask);
typedef void (GL_APIENTRYP PFNGLSTENCILSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask);
typedef void (GL_APIENTRYP PFNGLSTENCILFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues);
typedef void (GL_APIENTRYP PFNGLSTENCILSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues);
typedef void (GL_APIENTRYP PFNGLPATHCOVERDEPTHFUNCNVPROC) (GLenum func);
typedef void (GL_APIENTRYP PFNGLCOVERFILLPATHNVPROC) (GLuint path, GLenum coverMode);
typedef void (GL_APIENTRYP PFNGLCOVERSTROKEPATHNVPROC) (GLuint path, GLenum coverMode);
typedef void (GL_APIENTRYP PFNGLCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
typedef void (GL_APIENTRYP PFNGLCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
typedef void (GL_APIENTRYP PFNGLGETPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, GLint *value);
typedef void (GL_APIENTRYP PFNGLGETPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, GLfloat *value);
typedef void (GL_APIENTRYP PFNGLGETPATHCOMMANDSNVPROC) (GLuint path, GLubyte *commands);
typedef void (GL_APIENTRYP PFNGLGETPATHCOORDSNVPROC) (GLuint path, GLfloat *coords);
typedef void (GL_APIENTRYP PFNGLGETPATHDASHARRAYNVPROC) (GLuint path, GLfloat *dashArray);
typedef void (GL_APIENTRYP PFNGLGETPATHMETRICSNVPROC) (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics);
typedef void (GL_APIENTRYP PFNGLGETPATHMETRICRANGENVPROC) (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics);
typedef void (GL_APIENTRYP PFNGLGETPATHSPACINGNVPROC) (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing);
typedef GLboolean (GL_APIENTRYP PFNGLISPOINTINFILLPATHNVPROC) (GLuint path, GLuint mask, GLfloat x, GLfloat y);
typedef GLboolean (GL_APIENTRYP PFNGLISPOINTINSTROKEPATHNVPROC) (GLuint path, GLfloat x, GLfloat y);
typedef GLfloat (GL_APIENTRYP PFNGLGETPATHLENGTHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments);
typedef GLboolean (GL_APIENTRYP PFNGLPOINTALONGPATHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY);
typedef void (GL_APIENTRYP PFNGLMATRIXLOAD3X2FNVPROC) (GLenum matrixMode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXLOAD3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXLOADTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULT3X2FNVPROC) (GLenum matrixMode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULT3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULTTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode);
typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask, GLenum coverMode);
typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
typedef GLenum (GL_APIENTRYP PFNGLPATHGLYPHINDEXRANGENVPROC) (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint *baseAndCount);
typedef GLenum (GL_APIENTRYP PFNGLPATHGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
typedef GLenum (GL_APIENTRYP PFNGLPATHMEMORYGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
typedef void (GL_APIENTRYP PFNGLPROGRAMPATHFRAGMENTINPUTGENNVPROC) (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs);
typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEFVNVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei count, GLsizei *length, GLfloat *params);
typedef void (GL_APIENTRYP PFNGLMATRIXFRUSTUMEXTPROC) (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
typedef void (GL_APIENTRYP PFNGLMATRIXLOADIDENTITYEXTPROC) (GLenum mode);
typedef void (GL_APIENTRYP PFNGLMATRIXLOADTRANSPOSEFEXTPROC) (GLenum mode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXLOADTRANSPOSEDEXTPROC) (GLenum mode, const GLdouble *m);
typedef void (GL_APIENTRYP PFNGLMATRIXLOADFEXTPROC) (GLenum mode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXLOADDEXTPROC) (GLenum mode, const GLdouble *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULTTRANSPOSEFEXTPROC) (GLenum mode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULTTRANSPOSEDEXTPROC) (GLenum mode, const GLdouble *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULTFEXTPROC) (GLenum mode, const GLfloat *m);
typedef void (GL_APIENTRYP PFNGLMATRIXMULTDEXTPROC) (GLenum mode, const GLdouble *m);
typedef void (GL_APIENTRYP PFNGLMATRIXORTHOEXTPROC) (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
typedef void (GL_APIENTRYP PFNGLMATRIXPOPEXTPROC) (GLenum mode);
typedef void (GL_APIENTRYP PFNGLMATRIXPUSHEXTPROC) (GLenum mode);
typedef void (GL_APIENTRYP PFNGLMATRIXROTATEFEXTPROC) (GLenum mode, GLfloat angle, GLfloat x, GLfloat y, GLfloat z);
typedef void (GL_APIENTRYP PFNGLMATRIXROTATEDEXTPROC) (GLenum mode, GLdouble angle, GLdouble x, GLdouble y, GLdouble z);
typedef void (GL_APIENTRYP PFNGLMATRIXSCALEFEXTPROC) (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
typedef void (GL_APIENTRYP PFNGLMATRIXSCALEDEXTPROC) (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
typedef void (GL_APIENTRYP PFNGLMATRIXTRANSLATEFEXTPROC) (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
typedef void (GL_APIENTRYP PFNGLMATRIXTRANSLATEDEXTPROC) (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL GLuint GL_APIENTRY glGenPathsNV (GLsizei range);
GL_APICALL void GL_APIENTRY glDeletePathsNV (GLuint path, GLsizei range);
GL_APICALL GLboolean GL_APIENTRY glIsPathNV (GLuint path);
GL_APICALL void GL_APIENTRY glPathCommandsNV (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
GL_APICALL void GL_APIENTRY glPathCoordsNV (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords);
GL_APICALL void GL_APIENTRY glPathSubCommandsNV (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
GL_APICALL void GL_APIENTRY glPathSubCoordsNV (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords);
GL_APICALL void GL_APIENTRY glPathStringNV (GLuint path, GLenum format, GLsizei length, const void *pathString);
GL_APICALL void GL_APIENTRY glPathGlyphsNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
GL_APICALL void GL_APIENTRY glPathGlyphRangeNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
GL_APICALL void GL_APIENTRY glWeightPathsNV (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights);
GL_APICALL void GL_APIENTRY glCopyPathNV (GLuint resultPath, GLuint srcPath);
GL_APICALL void GL_APIENTRY glInterpolatePathsNV (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight);
GL_APICALL void GL_APIENTRY glTransformPathNV (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues);
GL_APICALL void GL_APIENTRY glPathParameterivNV (GLuint path, GLenum pname, const GLint *value);
GL_APICALL void GL_APIENTRY glPathParameteriNV (GLuint path, GLenum pname, GLint value);
GL_APICALL void GL_APIENTRY glPathParameterfvNV (GLuint path, GLenum pname, const GLfloat *value);
GL_APICALL void GL_APIENTRY glPathParameterfNV (GLuint path, GLenum pname, GLfloat value);
GL_APICALL void GL_APIENTRY glPathDashArrayNV (GLuint path, GLsizei dashCount, const GLfloat *dashArray);
GL_APICALL void GL_APIENTRY glPathStencilFuncNV (GLenum func, GLint ref, GLuint mask);
GL_APICALL void GL_APIENTRY glPathStencilDepthOffsetNV (GLfloat factor, GLfloat units);
GL_APICALL void GL_APIENTRY glStencilFillPathNV (GLuint path, GLenum fillMode, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilStrokePathNV (GLuint path, GLint reference, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues);
GL_APICALL void GL_APIENTRY glStencilStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues);
GL_APICALL void GL_APIENTRY glPathCoverDepthFuncNV (GLenum func);
GL_APICALL void GL_APIENTRY glCoverFillPathNV (GLuint path, GLenum coverMode);
GL_APICALL void GL_APIENTRY glCoverStrokePathNV (GLuint path, GLenum coverMode);
GL_APICALL void GL_APIENTRY glCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
GL_APICALL void GL_APIENTRY glCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
GL_APICALL void GL_APIENTRY glGetPathParameterivNV (GLuint path, GLenum pname, GLint *value);
GL_APICALL void GL_APIENTRY glGetPathParameterfvNV (GLuint path, GLenum pname, GLfloat *value);
GL_APICALL void GL_APIENTRY glGetPathCommandsNV (GLuint path, GLubyte *commands);
GL_APICALL void GL_APIENTRY glGetPathCoordsNV (GLuint path, GLfloat *coords);
GL_APICALL void GL_APIENTRY glGetPathDashArrayNV (GLuint path, GLfloat *dashArray);
GL_APICALL void GL_APIENTRY glGetPathMetricsNV (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics);
GL_APICALL void GL_APIENTRY glGetPathMetricRangeNV (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics);
GL_APICALL void GL_APIENTRY glGetPathSpacingNV (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing);
GL_APICALL GLboolean GL_APIENTRY glIsPointInFillPathNV (GLuint path, GLuint mask, GLfloat x, GLfloat y);
GL_APICALL GLboolean GL_APIENTRY glIsPointInStrokePathNV (GLuint path, GLfloat x, GLfloat y);
GL_APICALL GLfloat GL_APIENTRY glGetPathLengthNV (GLuint path, GLsizei startSegment, GLsizei numSegments);
GL_APICALL GLboolean GL_APIENTRY glPointAlongPathNV (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY);
GL_APICALL void GL_APIENTRY glMatrixLoad3x2fNV (GLenum matrixMode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixLoad3x3fNV (GLenum matrixMode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixLoadTranspose3x3fNV (GLenum matrixMode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixMult3x2fNV (GLenum matrixMode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixMult3x3fNV (GLenum matrixMode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixMultTranspose3x3fNV (GLenum matrixMode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathNV (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode);
GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathNV (GLuint path, GLint reference, GLuint mask, GLenum coverMode);
GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
GL_APICALL GLenum GL_APIENTRY glPathGlyphIndexRangeNV (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint *baseAndCount);
GL_APICALL GLenum GL_APIENTRY glPathGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
GL_APICALL GLenum GL_APIENTRY glPathMemoryGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
GL_APICALL void GL_APIENTRY glProgramPathFragmentInputGenNV (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs);
GL_APICALL void GL_APIENTRY glGetProgramResourcefvNV (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei count, GLsizei *length, GLfloat *params);
GL_APICALL void GL_APIENTRY glMatrixFrustumEXT (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
GL_APICALL void GL_APIENTRY glMatrixLoadIdentityEXT (GLenum mode);
GL_APICALL void GL_APIENTRY glMatrixLoadTransposefEXT (GLenum mode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixLoadTransposedEXT (GLenum mode, const GLdouble *m);
GL_APICALL void GL_APIENTRY glMatrixLoadfEXT (GLenum mode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixLoaddEXT (GLenum mode, const GLdouble *m);
GL_APICALL void GL_APIENTRY glMatrixMultTransposefEXT (GLenum mode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixMultTransposedEXT (GLenum mode, const GLdouble *m);
GL_APICALL void GL_APIENTRY glMatrixMultfEXT (GLenum mode, const GLfloat *m);
GL_APICALL void GL_APIENTRY glMatrixMultdEXT (GLenum mode, const GLdouble *m);
GL_APICALL void GL_APIENTRY glMatrixOrthoEXT (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
GL_APICALL void GL_APIENTRY glMatrixPopEXT (GLenum mode);
GL_APICALL void GL_APIENTRY glMatrixPushEXT (GLenum mode);
GL_APICALL void GL_APIENTRY glMatrixRotatefEXT (GLenum mode, GLfloat angle, GLfloat x, GLfloat y, GLfloat z);
GL_APICALL void GL_APIENTRY glMatrixRotatedEXT (GLenum mode, GLdouble angle, GLdouble x, GLdouble y, GLdouble z);
GL_APICALL void GL_APIENTRY glMatrixScalefEXT (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
GL_APICALL void GL_APIENTRY glMatrixScaledEXT (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
GL_APICALL void GL_APIENTRY glMatrixTranslatefEXT (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
GL_APICALL void GL_APIENTRY glMatrixTranslatedEXT (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
#endif
#endif /* GL_NV_path_rendering */
#ifndef GL_NV_path_rendering_shared_edge
#define GL_NV_path_rendering_shared_edge 1
#define GL_SHARED_EDGE_NV 0xC0
#endif /* GL_NV_path_rendering_shared_edge */
#ifndef GL_NV_pixel_buffer_object
#define GL_NV_pixel_buffer_object 1
#define GL_PIXEL_PACK_BUFFER_NV 0x88EB
#define GL_PIXEL_UNPACK_BUFFER_NV 0x88EC
#define GL_PIXEL_PACK_BUFFER_BINDING_NV 0x88ED
#define GL_PIXEL_UNPACK_BUFFER_BINDING_NV 0x88EF
#endif /* GL_NV_pixel_buffer_object */
#ifndef GL_NV_polygon_mode
#define GL_NV_polygon_mode 1
#define GL_POLYGON_MODE_NV 0x0B40
#define GL_POLYGON_OFFSET_POINT_NV 0x2A01
#define GL_POLYGON_OFFSET_LINE_NV 0x2A02
#define GL_POINT_NV 0x1B00
#define GL_LINE_NV 0x1B01
#define GL_FILL_NV 0x1B02
typedef void (GL_APIENTRYP PFNGLPOLYGONMODENVPROC) (GLenum face, GLenum mode);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glPolygonModeNV (GLenum face, GLenum mode);
#endif
#endif /* GL_NV_polygon_mode */
#ifndef GL_NV_primitive_shading_rate
#define GL_NV_primitive_shading_rate 1
#define GL_SHADING_RATE_IMAGE_PER_PRIMITIVE_NV 0x95B1
#define GL_SHADING_RATE_IMAGE_PALETTE_COUNT_NV 0x95B2
#endif /* GL_NV_primitive_shading_rate */
#ifndef GL_NV_read_buffer
#define GL_NV_read_buffer 1
#define GL_READ_BUFFER_NV 0x0C02
typedef void (GL_APIENTRYP PFNGLREADBUFFERNVPROC) (GLenum mode);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glReadBufferNV (GLenum mode);
#endif
#endif /* GL_NV_read_buffer */
#ifndef GL_NV_read_buffer_front
#define GL_NV_read_buffer_front 1
#endif /* GL_NV_read_buffer_front */
#ifndef GL_NV_read_depth
#define GL_NV_read_depth 1
#endif /* GL_NV_read_depth */
#ifndef GL_NV_read_depth_stencil
#define GL_NV_read_depth_stencil 1
#endif /* GL_NV_read_depth_stencil */
#ifndef GL_NV_read_stencil
#define GL_NV_read_stencil 1
#endif /* GL_NV_read_stencil */
#ifndef GL_NV_representative_fragment_test
#define GL_NV_representative_fragment_test 1
#define GL_REPRESENTATIVE_FRAGMENT_TEST_NV 0x937F
#endif /* GL_NV_representative_fragment_test */
#ifndef GL_NV_sRGB_formats
#define GL_NV_sRGB_formats 1
#define GL_SLUMINANCE_NV 0x8C46
#define GL_SLUMINANCE_ALPHA_NV 0x8C44
#define GL_SRGB8_NV 0x8C41
#define GL_SLUMINANCE8_NV 0x8C47
#define GL_SLUMINANCE8_ALPHA8_NV 0x8C45
#define GL_COMPRESSED_SRGB_S3TC_DXT1_NV 0x8C4C
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_NV 0x8C4D
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_NV 0x8C4E
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_NV 0x8C4F
#define GL_ETC1_SRGB8_NV 0x88EE
#endif /* GL_NV_sRGB_formats */
#ifndef GL_NV_sample_locations
#define GL_NV_sample_locations 1
#define GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV 0x933D
#define GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV 0x933E
#define GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV 0x933F
#define GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_NV 0x9340
#define GL_SAMPLE_LOCATION_NV 0x8E50
#define GL_PROGRAMMABLE_SAMPLE_LOCATION_NV 0x9341
#define GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_NV 0x9342
#define GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_NV 0x9343
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLenum target, GLuint start, GLsizei count, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLNAMEDFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLRESOLVEDEPTHVALUESNVPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferSampleLocationsfvNV (GLenum target, GLuint start, GLsizei count, const GLfloat *v);
GL_APICALL void GL_APIENTRY glNamedFramebufferSampleLocationsfvNV (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
GL_APICALL void GL_APIENTRY glResolveDepthValuesNV (void);
#endif
#endif /* GL_NV_sample_locations */
#ifndef GL_NV_sample_mask_override_coverage
#define GL_NV_sample_mask_override_coverage 1
#endif /* GL_NV_sample_mask_override_coverage */
#ifndef GL_NV_scissor_exclusive
#define GL_NV_scissor_exclusive 1
#define GL_SCISSOR_TEST_EXCLUSIVE_NV 0x9555
#define GL_SCISSOR_BOX_EXCLUSIVE_NV 0x9556
typedef void (GL_APIENTRYP PFNGLSCISSOREXCLUSIVENVPROC) (GLint x, GLint y, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLSCISSOREXCLUSIVEARRAYVNVPROC) (GLuint first, GLsizei count, const GLint *v);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glScissorExclusiveNV (GLint x, GLint y, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glScissorExclusiveArrayvNV (GLuint first, GLsizei count, const GLint *v);
#endif
#endif /* GL_NV_scissor_exclusive */
#ifndef GL_NV_shader_atomic_fp16_vector
#define GL_NV_shader_atomic_fp16_vector 1
#endif /* GL_NV_shader_atomic_fp16_vector */
#ifndef GL_NV_shader_noperspective_interpolation
#define GL_NV_shader_noperspective_interpolation 1
#endif /* GL_NV_shader_noperspective_interpolation */
#ifndef GL_NV_shader_subgroup_partitioned
#define GL_NV_shader_subgroup_partitioned 1
#define GL_SUBGROUP_FEATURE_PARTITIONED_BIT_NV 0x00000100
#endif /* GL_NV_shader_subgroup_partitioned */
#ifndef GL_NV_shader_texture_footprint
#define GL_NV_shader_texture_footprint 1
#endif /* GL_NV_shader_texture_footprint */
#ifndef GL_NV_shading_rate_image
#define GL_NV_shading_rate_image 1
#define GL_SHADING_RATE_IMAGE_NV 0x9563
#define GL_SHADING_RATE_NO_INVOCATIONS_NV 0x9564
#define GL_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV 0x9565
#define GL_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV 0x9566
#define GL_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV 0x9567
#define GL_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV 0x9568
#define GL_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV 0x9569
#define GL_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV 0x956A
#define GL_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV 0x956B
#define GL_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV 0x956C
#define GL_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV 0x956D
#define GL_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV 0x956E
#define GL_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV 0x956F
#define GL_SHADING_RATE_IMAGE_BINDING_NV 0x955B
#define GL_SHADING_RATE_IMAGE_TEXEL_WIDTH_NV 0x955C
#define GL_SHADING_RATE_IMAGE_TEXEL_HEIGHT_NV 0x955D
#define GL_SHADING_RATE_IMAGE_PALETTE_SIZE_NV 0x955E
#define GL_MAX_COARSE_FRAGMENT_SAMPLES_NV 0x955F
#define GL_SHADING_RATE_SAMPLE_ORDER_DEFAULT_NV 0x95AE
#define GL_SHADING_RATE_SAMPLE_ORDER_PIXEL_MAJOR_NV 0x95AF
#define GL_SHADING_RATE_SAMPLE_ORDER_SAMPLE_MAJOR_NV 0x95B0
typedef void (GL_APIENTRYP PFNGLBINDSHADINGRATEIMAGENVPROC) (GLuint texture);
typedef void (GL_APIENTRYP PFNGLGETSHADINGRATEIMAGEPALETTENVPROC) (GLuint viewport, GLuint entry, GLenum *rate);
typedef void (GL_APIENTRYP PFNGLGETSHADINGRATESAMPLELOCATIONIVNVPROC) (GLenum rate, GLuint samples, GLuint index, GLint *location);
typedef void (GL_APIENTRYP PFNGLSHADINGRATEIMAGEBARRIERNVPROC) (GLboolean synchronize);
typedef void (GL_APIENTRYP PFNGLSHADINGRATEIMAGEPALETTENVPROC) (GLuint viewport, GLuint first, GLsizei count, const GLenum *rates);
typedef void (GL_APIENTRYP PFNGLSHADINGRATESAMPLEORDERNVPROC) (GLenum order);
typedef void (GL_APIENTRYP PFNGLSHADINGRATESAMPLEORDERCUSTOMNVPROC) (GLenum rate, GLuint samples, const GLint *locations);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glBindShadingRateImageNV (GLuint texture);
GL_APICALL void GL_APIENTRY glGetShadingRateImagePaletteNV (GLuint viewport, GLuint entry, GLenum *rate);
GL_APICALL void GL_APIENTRY glGetShadingRateSampleLocationivNV (GLenum rate, GLuint samples, GLuint index, GLint *location);
GL_APICALL void GL_APIENTRY glShadingRateImageBarrierNV (GLboolean synchronize);
GL_APICALL void GL_APIENTRY glShadingRateImagePaletteNV (GLuint viewport, GLuint first, GLsizei count, const GLenum *rates);
GL_APICALL void GL_APIENTRY glShadingRateSampleOrderNV (GLenum order);
GL_APICALL void GL_APIENTRY glShadingRateSampleOrderCustomNV (GLenum rate, GLuint samples, const GLint *locations);
#endif
#endif /* GL_NV_shading_rate_image */
#ifndef GL_NV_shadow_samplers_array
#define GL_NV_shadow_samplers_array 1
#define GL_SAMPLER_2D_ARRAY_SHADOW_NV 0x8DC4
#endif /* GL_NV_shadow_samplers_array */
#ifndef GL_NV_shadow_samplers_cube
#define GL_NV_shadow_samplers_cube 1
#define GL_SAMPLER_CUBE_SHADOW_NV 0x8DC5
#endif /* GL_NV_shadow_samplers_cube */
#ifndef GL_NV_stereo_view_rendering
#define GL_NV_stereo_view_rendering 1
#endif /* GL_NV_stereo_view_rendering */
#ifndef GL_NV_texture_border_clamp
#define GL_NV_texture_border_clamp 1
#define GL_TEXTURE_BORDER_COLOR_NV 0x1004
#define GL_CLAMP_TO_BORDER_NV 0x812D
#endif /* GL_NV_texture_border_clamp */
#ifndef GL_NV_texture_compression_s3tc_update
#define GL_NV_texture_compression_s3tc_update 1
#endif /* GL_NV_texture_compression_s3tc_update */
#ifndef GL_NV_texture_npot_2D_mipmap
#define GL_NV_texture_npot_2D_mipmap 1
#endif /* GL_NV_texture_npot_2D_mipmap */
#ifndef GL_NV_timeline_semaphore
#define GL_NV_timeline_semaphore 1
#define GL_TIMELINE_SEMAPHORE_VALUE_NV 0x9595
#define GL_SEMAPHORE_TYPE_NV 0x95B3
#define GL_SEMAPHORE_TYPE_BINARY_NV 0x95B4
#define GL_SEMAPHORE_TYPE_TIMELINE_NV 0x95B5
#define GL_MAX_TIMELINE_SEMAPHORE_VALUE_DIFFERENCE_NV 0x95B6
typedef void (GL_APIENTRYP PFNGLCREATESEMAPHORESNVPROC) (GLsizei n, GLuint *semaphores);
typedef void (GL_APIENTRYP PFNGLSEMAPHOREPARAMETERIVNVPROC) (GLuint semaphore, GLenum pname, const GLint *params);
typedef void (GL_APIENTRYP PFNGLGETSEMAPHOREPARAMETERIVNVPROC) (GLuint semaphore, GLenum pname, GLint *params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glCreateSemaphoresNV (GLsizei n, GLuint *semaphores);
GL_APICALL void GL_APIENTRY glSemaphoreParameterivNV (GLuint semaphore, GLenum pname, const GLint *params);
GL_APICALL void GL_APIENTRY glGetSemaphoreParameterivNV (GLuint semaphore, GLenum pname, GLint *params);
#endif
#endif /* GL_NV_timeline_semaphore */
#ifndef GL_NV_viewport_array
#define GL_NV_viewport_array 1
#define GL_MAX_VIEWPORTS_NV 0x825B
#define GL_VIEWPORT_SUBPIXEL_BITS_NV 0x825C
#define GL_VIEWPORT_BOUNDS_RANGE_NV 0x825D
#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV 0x825F
typedef void (GL_APIENTRYP PFNGLVIEWPORTARRAYVNVPROC) (GLuint first, GLsizei count, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFVNVPROC) (GLuint index, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLSCISSORARRAYVNVPROC) (GLuint first, GLsizei count, const GLint *v);
typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDNVPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDVNVPROC) (GLuint index, const GLint *v);
typedef void (GL_APIENTRYP PFNGLDEPTHRANGEARRAYFVNVPROC) (GLuint first, GLsizei count, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLDEPTHRANGEINDEXEDFNVPROC) (GLuint index, GLfloat n, GLfloat f);
typedef void (GL_APIENTRYP PFNGLGETFLOATI_VNVPROC) (GLenum target, GLuint index, GLfloat *data);
typedef void (GL_APIENTRYP PFNGLENABLEINVPROC) (GLenum target, GLuint index);
typedef void (GL_APIENTRYP PFNGLDISABLEINVPROC) (GLenum target, GLuint index);
typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDINVPROC) (GLenum target, GLuint index);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glViewportArrayvNV (GLuint first, GLsizei count, const GLfloat *v);
GL_APICALL void GL_APIENTRY glViewportIndexedfNV (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
GL_APICALL void GL_APIENTRY glViewportIndexedfvNV (GLuint index, const GLfloat *v);
GL_APICALL void GL_APIENTRY glScissorArrayvNV (GLuint first, GLsizei count, const GLint *v);
GL_APICALL void GL_APIENTRY glScissorIndexedNV (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glScissorIndexedvNV (GLuint index, const GLint *v);
GL_APICALL void GL_APIENTRY glDepthRangeArrayfvNV (GLuint first, GLsizei count, const GLfloat *v);
GL_APICALL void GL_APIENTRY glDepthRangeIndexedfNV (GLuint index, GLfloat n, GLfloat f);
GL_APICALL void GL_APIENTRY glGetFloati_vNV (GLenum target, GLuint index, GLfloat *data);
GL_APICALL void GL_APIENTRY glEnableiNV (GLenum target, GLuint index);
GL_APICALL void GL_APIENTRY glDisableiNV (GLenum target, GLuint index);
GL_APICALL GLboolean GL_APIENTRY glIsEnablediNV (GLenum target, GLuint index);
#endif
#endif /* GL_NV_viewport_array */
#ifndef GL_NV_viewport_array2
#define GL_NV_viewport_array2 1
#endif /* GL_NV_viewport_array2 */
#ifndef GL_NV_viewport_swizzle
#define GL_NV_viewport_swizzle 1
#define GL_VIEWPORT_SWIZZLE_POSITIVE_X_NV 0x9350
#define GL_VIEWPORT_SWIZZLE_NEGATIVE_X_NV 0x9351
#define GL_VIEWPORT_SWIZZLE_POSITIVE_Y_NV 0x9352
#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Y_NV 0x9353
#define GL_VIEWPORT_SWIZZLE_POSITIVE_Z_NV 0x9354
#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Z_NV 0x9355
#define GL_VIEWPORT_SWIZZLE_POSITIVE_W_NV 0x9356
#define GL_VIEWPORT_SWIZZLE_NEGATIVE_W_NV 0x9357
#define GL_VIEWPORT_SWIZZLE_X_NV 0x9358
#define GL_VIEWPORT_SWIZZLE_Y_NV 0x9359
#define GL_VIEWPORT_SWIZZLE_Z_NV 0x935A
#define GL_VIEWPORT_SWIZZLE_W_NV 0x935B
typedef void (GL_APIENTRYP PFNGLVIEWPORTSWIZZLENVPROC) (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew);
#endif
#endif /* GL_NV_viewport_swizzle */
#ifndef GL_OVR_multiview
#define GL_OVR_multiview 1
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_NUM_VIEWS_OVR 0x9630
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_BASE_VIEW_INDEX_OVR 0x9632
#define GL_MAX_VIEWS_OVR 0x9631
#define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
#endif
#endif /* GL_OVR_multiview */
#ifndef GL_OVR_multiview2
#define GL_OVR_multiview2 1
#endif /* GL_OVR_multiview2 */
#ifndef GL_OVR_multiview_multisampled_render_to_texture
#define GL_OVR_multiview_multisampled_render_to_texture 1
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTISAMPLEMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferTextureMultisampleMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews);
#endif
#endif /* GL_OVR_multiview_multisampled_render_to_texture */
#ifndef GL_QCOM_YUV_texture_gather
#define GL_QCOM_YUV_texture_gather 1
#endif /* GL_QCOM_YUV_texture_gather */
#ifndef GL_QCOM_alpha_test
#define GL_QCOM_alpha_test 1
#define GL_ALPHA_TEST_QCOM 0x0BC0
#define GL_ALPHA_TEST_FUNC_QCOM 0x0BC1
#define GL_ALPHA_TEST_REF_QCOM 0x0BC2
typedef void (GL_APIENTRYP PFNGLALPHAFUNCQCOMPROC) (GLenum func, GLclampf ref);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glAlphaFuncQCOM (GLenum func, GLclampf ref);
#endif
#endif /* GL_QCOM_alpha_test */
#ifndef GL_QCOM_binning_control
#define GL_QCOM_binning_control 1
#define GL_BINNING_CONTROL_HINT_QCOM 0x8FB0
#define GL_CPU_OPTIMIZED_QCOM 0x8FB1
#define GL_GPU_OPTIMIZED_QCOM 0x8FB2
#define GL_RENDER_DIRECT_TO_FRAMEBUFFER_QCOM 0x8FB3
#endif /* GL_QCOM_binning_control */
#ifndef GL_QCOM_driver_control
#define GL_QCOM_driver_control 1
typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSQCOMPROC) (GLint *num, GLsizei size, GLuint *driverControls);
typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSTRINGQCOMPROC) (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString);
typedef void (GL_APIENTRYP PFNGLENABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl);
typedef void (GL_APIENTRYP PFNGLDISABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glGetDriverControlsQCOM (GLint *num, GLsizei size, GLuint *driverControls);
GL_APICALL void GL_APIENTRY glGetDriverControlStringQCOM (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString);
GL_APICALL void GL_APIENTRY glEnableDriverControlQCOM (GLuint driverControl);
GL_APICALL void GL_APIENTRY glDisableDriverControlQCOM (GLuint driverControl);
#endif
#endif /* GL_QCOM_driver_control */
#ifndef GL_QCOM_extended_get
#define GL_QCOM_extended_get 1
#define GL_TEXTURE_WIDTH_QCOM 0x8BD2
#define GL_TEXTURE_HEIGHT_QCOM 0x8BD3
#define GL_TEXTURE_DEPTH_QCOM 0x8BD4
#define GL_TEXTURE_INTERNAL_FORMAT_QCOM 0x8BD5
#define GL_TEXTURE_FORMAT_QCOM 0x8BD6
#define GL_TEXTURE_TYPE_QCOM 0x8BD7
#define GL_TEXTURE_IMAGE_VALID_QCOM 0x8BD8
#define GL_TEXTURE_NUM_LEVELS_QCOM 0x8BD9
#define GL_TEXTURE_TARGET_QCOM 0x8BDA
#define GL_TEXTURE_OBJECT_VALID_QCOM 0x8BDB
#define GL_STATE_RESTORE 0x8BDC
typedef void (GL_APIENTRYP PFNGLEXTGETTEXTURESQCOMPROC) (GLuint *textures, GLint maxTextures, GLint *numTextures);
typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERSQCOMPROC) (GLuint *buffers, GLint maxBuffers, GLint *numBuffers);
typedef void (GL_APIENTRYP PFNGLEXTGETRENDERBUFFERSQCOMPROC) (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers);
typedef void (GL_APIENTRYP PFNGLEXTGETFRAMEBUFFERSQCOMPROC) (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers);
typedef void (GL_APIENTRYP PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC) (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC) (GLenum target, GLenum pname, GLint param);
typedef void (GL_APIENTRYP PFNGLEXTGETTEXSUBIMAGEQCOMPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels);
typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERPOINTERVQCOMPROC) (GLenum target, void **params);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glExtGetTexturesQCOM (GLuint *textures, GLint maxTextures, GLint *numTextures);
GL_APICALL void GL_APIENTRY glExtGetBuffersQCOM (GLuint *buffers, GLint maxBuffers, GLint *numBuffers);
GL_APICALL void GL_APIENTRY glExtGetRenderbuffersQCOM (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers);
GL_APICALL void GL_APIENTRY glExtGetFramebuffersQCOM (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers);
GL_APICALL void GL_APIENTRY glExtGetTexLevelParameterivQCOM (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glExtTexObjectStateOverrideiQCOM (GLenum target, GLenum pname, GLint param);
GL_APICALL void GL_APIENTRY glExtGetTexSubImageQCOM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels);
GL_APICALL void GL_APIENTRY glExtGetBufferPointervQCOM (GLenum target, void **params);
#endif
#endif /* GL_QCOM_extended_get */
#ifndef GL_QCOM_extended_get2
#define GL_QCOM_extended_get2 1
typedef void (GL_APIENTRYP PFNGLEXTGETSHADERSQCOMPROC) (GLuint *shaders, GLint maxShaders, GLint *numShaders);
typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMSQCOMPROC) (GLuint *programs, GLint maxPrograms, GLint *numPrograms);
typedef GLboolean (GL_APIENTRYP PFNGLEXTISPROGRAMBINARYQCOMPROC) (GLuint program);
typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC) (GLuint program, GLenum shadertype, GLchar *source, GLint *length);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glExtGetShadersQCOM (GLuint *shaders, GLint maxShaders, GLint *numShaders);
GL_APICALL void GL_APIENTRY glExtGetProgramsQCOM (GLuint *programs, GLint maxPrograms, GLint *numPrograms);
GL_APICALL GLboolean GL_APIENTRY glExtIsProgramBinaryQCOM (GLuint program);
GL_APICALL void GL_APIENTRY glExtGetProgramBinarySourceQCOM (GLuint program, GLenum shadertype, GLchar *source, GLint *length);
#endif
#endif /* GL_QCOM_extended_get2 */
#ifndef GL_QCOM_frame_extrapolation
#define GL_QCOM_frame_extrapolation 1
typedef void (GL_APIENTRYP PFNGLEXTRAPOLATETEX2DQCOMPROC) (GLuint src1, GLuint src2, GLuint output, GLfloat scaleFactor);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glExtrapolateTex2DQCOM (GLuint src1, GLuint src2, GLuint output, GLfloat scaleFactor);
#endif
#endif /* GL_QCOM_frame_extrapolation */
#ifndef GL_QCOM_framebuffer_foveated
#define GL_QCOM_framebuffer_foveated 1
#define GL_FOVEATION_ENABLE_BIT_QCOM 0x00000001
#define GL_FOVEATION_SCALED_BIN_METHOD_BIT_QCOM 0x00000002
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFOVEATIONCONFIGQCOMPROC) (GLuint framebuffer, GLuint numLayers, GLuint focalPointsPerLayer, GLuint requestedFeatures, GLuint *providedFeatures);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFOVEATIONPARAMETERSQCOMPROC) (GLuint framebuffer, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferFoveationConfigQCOM (GLuint framebuffer, GLuint numLayers, GLuint focalPointsPerLayer, GLuint requestedFeatures, GLuint *providedFeatures);
GL_APICALL void GL_APIENTRY glFramebufferFoveationParametersQCOM (GLuint framebuffer, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea);
#endif
#endif /* GL_QCOM_framebuffer_foveated */
#ifndef GL_QCOM_motion_estimation
#define GL_QCOM_motion_estimation 1
#define GL_MOTION_ESTIMATION_SEARCH_BLOCK_X_QCOM 0x8C90
#define GL_MOTION_ESTIMATION_SEARCH_BLOCK_Y_QCOM 0x8C91
typedef void (GL_APIENTRYP PFNGLTEXESTIMATEMOTIONQCOMPROC) (GLuint ref, GLuint target, GLuint output);
typedef void (GL_APIENTRYP PFNGLTEXESTIMATEMOTIONREGIONSQCOMPROC) (GLuint ref, GLuint target, GLuint output, GLuint mask);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTexEstimateMotionQCOM (GLuint ref, GLuint target, GLuint output);
GL_APICALL void GL_APIENTRY glTexEstimateMotionRegionsQCOM (GLuint ref, GLuint target, GLuint output, GLuint mask);
#endif
#endif /* GL_QCOM_motion_estimation */
#ifndef GL_QCOM_perfmon_global_mode
#define GL_QCOM_perfmon_global_mode 1
#define GL_PERFMON_GLOBAL_MODE_QCOM 0x8FA0
#endif /* GL_QCOM_perfmon_global_mode */
#ifndef GL_QCOM_render_shared_exponent
#define GL_QCOM_render_shared_exponent 1
#endif /* GL_QCOM_render_shared_exponent */
#ifndef GL_QCOM_shader_framebuffer_fetch_noncoherent
#define GL_QCOM_shader_framebuffer_fetch_noncoherent 1
#define GL_FRAMEBUFFER_FETCH_NONCOHERENT_QCOM 0x96A2
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFETCHBARRIERQCOMPROC) (void);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glFramebufferFetchBarrierQCOM (void);
#endif
#endif /* GL_QCOM_shader_framebuffer_fetch_noncoherent */
#ifndef GL_QCOM_shader_framebuffer_fetch_rate
#define GL_QCOM_shader_framebuffer_fetch_rate 1
#endif /* GL_QCOM_shader_framebuffer_fetch_rate */
#ifndef GL_QCOM_shading_rate
#define GL_QCOM_shading_rate 1
#define GL_SHADING_RATE_QCOM 0x96A4
#define GL_SHADING_RATE_PRESERVE_ASPECT_RATIO_QCOM 0x96A5
#define GL_SHADING_RATE_1X1_PIXELS_QCOM 0x96A6
#define GL_SHADING_RATE_1X2_PIXELS_QCOM 0x96A7
#define GL_SHADING_RATE_2X1_PIXELS_QCOM 0x96A8
#define GL_SHADING_RATE_2X2_PIXELS_QCOM 0x96A9
#define GL_SHADING_RATE_4X2_PIXELS_QCOM 0x96AC
#define GL_SHADING_RATE_4X4_PIXELS_QCOM 0x96AE
typedef void (GL_APIENTRYP PFNGLSHADINGRATEQCOMPROC) (GLenum rate);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glShadingRateQCOM (GLenum rate);
#endif
#endif /* GL_QCOM_shading_rate */
#ifndef GL_QCOM_texture_foveated
#define GL_QCOM_texture_foveated 1
#define GL_TEXTURE_FOVEATED_FEATURE_BITS_QCOM 0x8BFB
#define GL_TEXTURE_FOVEATED_MIN_PIXEL_DENSITY_QCOM 0x8BFC
#define GL_TEXTURE_FOVEATED_FEATURE_QUERY_QCOM 0x8BFD
#define GL_TEXTURE_FOVEATED_NUM_FOCAL_POINTS_QUERY_QCOM 0x8BFE
#define GL_FRAMEBUFFER_INCOMPLETE_FOVEATION_QCOM 0x8BFF
typedef void (GL_APIENTRYP PFNGLTEXTUREFOVEATIONPARAMETERSQCOMPROC) (GLuint texture, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glTextureFoveationParametersQCOM (GLuint texture, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea);
#endif
#endif /* GL_QCOM_texture_foveated */
#ifndef GL_QCOM_texture_foveated2
#define GL_QCOM_texture_foveated2 1
#define GL_TEXTURE_FOVEATED_CUTOFF_DENSITY_QCOM 0x96A0
#endif /* GL_QCOM_texture_foveated2 */
#ifndef GL_QCOM_texture_foveated_subsampled_layout
#define GL_QCOM_texture_foveated_subsampled_layout 1
#define GL_FOVEATION_SUBSAMPLED_LAYOUT_METHOD_BIT_QCOM 0x00000004
#define GL_MAX_SHADER_SUBSAMPLED_IMAGE_UNITS_QCOM 0x8FA1
#endif /* GL_QCOM_texture_foveated_subsampled_layout */
#ifndef GL_QCOM_tiled_rendering
#define GL_QCOM_tiled_rendering 1
#define GL_COLOR_BUFFER_BIT0_QCOM 0x00000001
#define GL_COLOR_BUFFER_BIT1_QCOM 0x00000002
#define GL_COLOR_BUFFER_BIT2_QCOM 0x00000004
#define GL_COLOR_BUFFER_BIT3_QCOM 0x00000008
#define GL_COLOR_BUFFER_BIT4_QCOM 0x00000010
#define GL_COLOR_BUFFER_BIT5_QCOM 0x00000020
#define GL_COLOR_BUFFER_BIT6_QCOM 0x00000040
#define GL_COLOR_BUFFER_BIT7_QCOM 0x00000080
#define GL_DEPTH_BUFFER_BIT0_QCOM 0x00000100
#define GL_DEPTH_BUFFER_BIT1_QCOM 0x00000200
#define GL_DEPTH_BUFFER_BIT2_QCOM 0x00000400
#define GL_DEPTH_BUFFER_BIT3_QCOM 0x00000800
#define GL_DEPTH_BUFFER_BIT4_QCOM 0x00001000
#define GL_DEPTH_BUFFER_BIT5_QCOM 0x00002000
#define GL_DEPTH_BUFFER_BIT6_QCOM 0x00004000
#define GL_DEPTH_BUFFER_BIT7_QCOM 0x00008000
#define GL_STENCIL_BUFFER_BIT0_QCOM 0x00010000
#define GL_STENCIL_BUFFER_BIT1_QCOM 0x00020000
#define GL_STENCIL_BUFFER_BIT2_QCOM 0x00040000
#define GL_STENCIL_BUFFER_BIT3_QCOM 0x00080000
#define GL_STENCIL_BUFFER_BIT4_QCOM 0x00100000
#define GL_STENCIL_BUFFER_BIT5_QCOM 0x00200000
#define GL_STENCIL_BUFFER_BIT6_QCOM 0x00400000
#define GL_STENCIL_BUFFER_BIT7_QCOM 0x00800000
#define GL_MULTISAMPLE_BUFFER_BIT0_QCOM 0x01000000
#define GL_MULTISAMPLE_BUFFER_BIT1_QCOM 0x02000000
#define GL_MULTISAMPLE_BUFFER_BIT2_QCOM 0x04000000
#define GL_MULTISAMPLE_BUFFER_BIT3_QCOM 0x08000000
#define GL_MULTISAMPLE_BUFFER_BIT4_QCOM 0x10000000
#define GL_MULTISAMPLE_BUFFER_BIT5_QCOM 0x20000000
#define GL_MULTISAMPLE_BUFFER_BIT6_QCOM 0x40000000
#define GL_MULTISAMPLE_BUFFER_BIT7_QCOM 0x80000000
typedef void (GL_APIENTRYP PFNGLSTARTTILINGQCOMPROC) (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask);
typedef void (GL_APIENTRYP PFNGLENDTILINGQCOMPROC) (GLbitfield preserveMask);
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glStartTilingQCOM (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask);
GL_APICALL void GL_APIENTRY glEndTilingQCOM (GLbitfield preserveMask);
#endif
#endif /* GL_QCOM_tiled_rendering */
#ifndef GL_QCOM_writeonly_rendering
#define GL_QCOM_writeonly_rendering 1
#define GL_WRITEONLY_RENDERING_QCOM 0x8823
#endif /* GL_QCOM_writeonly_rendering */
#ifndef GL_VIV_shader_binary
#define GL_VIV_shader_binary 1
#define GL_SHADER_BINARY_VIV 0x8FC4
#endif /* GL_VIV_shader_binary */
#ifdef __cplusplus
}
#endif
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_version.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_version.h
*
* This header defines the current SDL version.
*/
#ifndef SDL_version_h_
#define SDL_version_h_
#include "SDL_stdinc.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/**
* Information about the version of SDL in use.
*
* Represents the library's version as three levels: major revision
* (increments with massive changes, additions, and enhancements),
* minor revision (increments with backwards-compatible changes to the
* major revision), and patchlevel (increments with fixes to the minor
* revision).
*
* \sa SDL_VERSION
* \sa SDL_GetVersion
*/
typedef struct SDL_version
{
Uint8 major; /**< major version */
Uint8 minor; /**< minor version */
Uint8 patch; /**< update version */
} SDL_version;
/* Printable format: "%d.%d.%d", MAJOR, MINOR, PATCHLEVEL
*/
#define SDL_MAJOR_VERSION 2
#define SDL_MINOR_VERSION 26
#define SDL_PATCHLEVEL 1
/**
* Macro to determine SDL version program was compiled against.
*
* This macro fills in a SDL_version structure with the version of the
* library you compiled against. This is determined by what header the
* compiler uses. Note that if you dynamically linked the library, you might
* have a slightly newer or older version at runtime. That version can be
* determined with SDL_GetVersion(), which, unlike SDL_VERSION(),
* is not a macro.
*
* \param x A pointer to a SDL_version struct to initialize.
*
* \sa SDL_version
* \sa SDL_GetVersion
*/
#define SDL_VERSION(x) \
{ \
(x)->major = SDL_MAJOR_VERSION; \
(x)->minor = SDL_MINOR_VERSION; \
(x)->patch = SDL_PATCHLEVEL; \
}
/* TODO: Remove this whole block in SDL 3 */
#if SDL_MAJOR_VERSION < 3
/**
* This macro turns the version numbers into a numeric value:
* \verbatim
(1,2,3) -> (1203)
\endverbatim
*
* This assumes that there will never be more than 100 patchlevels.
*
* In versions higher than 2.9.0, the minor version overflows into
* the thousands digit: for example, 2.23.0 is encoded as 4300,
* and 2.255.99 would be encoded as 25799.
* This macro will not be available in SDL 3.x.
*/
#define SDL_VERSIONNUM(X, Y, Z) \
((X)*1000 + (Y)*100 + (Z))
/**
* This is the version number macro for the current SDL version.
*
* In versions higher than 2.9.0, the minor version overflows into
* the thousands digit: for example, 2.23.0 is encoded as 4300.
* This macro will not be available in SDL 3.x.
*
* Deprecated, use SDL_VERSION_ATLEAST or SDL_VERSION instead.
*/
#define SDL_COMPILEDVERSION \
SDL_VERSIONNUM(SDL_MAJOR_VERSION, SDL_MINOR_VERSION, SDL_PATCHLEVEL)
#endif /* SDL_MAJOR_VERSION < 3 */
/**
* This macro will evaluate to true if compiled with SDL at least X.Y.Z.
*/
#define SDL_VERSION_ATLEAST(X, Y, Z) \
((SDL_MAJOR_VERSION >= X) && \
(SDL_MAJOR_VERSION > X || SDL_MINOR_VERSION >= Y) && \
(SDL_MAJOR_VERSION > X || SDL_MINOR_VERSION > Y || SDL_PATCHLEVEL >= Z))
/**
* Get the version of SDL that is linked against your program.
*
* If you are linking to SDL dynamically, then it is possible that the current
* version will be different than the version you compiled against. This
* function returns the current version, while SDL_VERSION() is a macro that
* tells you what version you compiled with.
*
* This function may be called safely at any time, even before SDL_Init().
*
* \param ver the SDL_version structure that contains the version information
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetRevision
*/
extern DECLSPEC void SDLCALL SDL_GetVersion(SDL_version * ver);
/**
* Get the code revision of SDL that is linked against your program.
*
* This value is the revision of the code you are linked with and may be
* different from the code you are compiling with, which is found in the
* constant SDL_REVISION.
*
* The revision is arbitrary string (a hash value) uniquely identifying the
* exact revision of the SDL library in use, and is only useful in comparing
* against other revisions. It is NOT an incrementing number.
*
* If SDL wasn't built from a git repository with the appropriate tools, this
* will return an empty string.
*
* Prior to SDL 2.0.16, before development moved to GitHub, this returned a
* hash for a Mercurial repository.
*
* You shouldn't use this function for anything but logging it for debugging
* purposes. The string is not intended to be reliable in any way.
*
* \returns an arbitrary string, uniquely identifying the exact revision of
* the SDL library in use.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetVersion
*/
extern DECLSPEC const char *SDLCALL SDL_GetRevision(void);
/**
* Obsolete function, do not use.
*
* When SDL was hosted in a Mercurial repository, and was built carefully,
* this would return the revision number that the build was created from. This
* number was not reliable for several reasons, but more importantly, SDL is
* now hosted in a git repository, which does not offer numbers at all, only
* hashes. This function only ever returns zero now. Don't use it.
*
* Before SDL 2.0.16, this might have returned an unreliable, but non-zero
* number.
*
* \deprecated Use SDL_GetRevision() instead; if SDL was carefully built, it
* will return a git hash.
*
* \returns zero, always, in modern SDL releases.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetRevision
*/
extern SDL_DEPRECATED DECLSPEC int SDLCALL SDL_GetRevisionNumber(void);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_version_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_keycode.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_keycode.h
*
* Defines constants which identify keyboard keys and modifiers.
*/
#ifndef SDL_keycode_h_
#define SDL_keycode_h_
#include "SDL_stdinc.h"
#include "SDL_scancode.h"
/**
* \brief The SDL virtual key representation.
*
* Values of this type are used to represent keyboard keys using the current
* layout of the keyboard. These values include Unicode values representing
* the unmodified character that would be generated by pressing the key, or
* an SDLK_* constant for those keys that do not generate characters.
*
* A special exception is the number keys at the top of the keyboard which
* always map to SDLK_0...SDLK_9, regardless of layout.
*/
typedef Sint32 SDL_Keycode;
#define SDLK_SCANCODE_MASK (1<<30)
#define SDL_SCANCODE_TO_KEYCODE(X) (X | SDLK_SCANCODE_MASK)
typedef enum
{
SDLK_UNKNOWN = 0,
SDLK_RETURN = '\r',
SDLK_ESCAPE = '\x1B',
SDLK_BACKSPACE = '\b',
SDLK_TAB = '\t',
SDLK_SPACE = ' ',
SDLK_EXCLAIM = '!',
SDLK_QUOTEDBL = '"',
SDLK_HASH = '#',
SDLK_PERCENT = '%',
SDLK_DOLLAR = '$',
SDLK_AMPERSAND = '&',
SDLK_QUOTE = '\'',
SDLK_LEFTPAREN = '(',
SDLK_RIGHTPAREN = ')',
SDLK_ASTERISK = '*',
SDLK_PLUS = '+',
SDLK_COMMA = ',',
SDLK_MINUS = '-',
SDLK_PERIOD = '.',
SDLK_SLASH = '/',
SDLK_0 = '0',
SDLK_1 = '1',
SDLK_2 = '2',
SDLK_3 = '3',
SDLK_4 = '4',
SDLK_5 = '5',
SDLK_6 = '6',
SDLK_7 = '7',
SDLK_8 = '8',
SDLK_9 = '9',
SDLK_COLON = ':',
SDLK_SEMICOLON = ';',
SDLK_LESS = '<',
SDLK_EQUALS = '=',
SDLK_GREATER = '>',
SDLK_QUESTION = '?',
SDLK_AT = '@',
/*
Skip uppercase letters
*/
SDLK_LEFTBRACKET = '[',
SDLK_BACKSLASH = '\\',
SDLK_RIGHTBRACKET = ']',
SDLK_CARET = '^',
SDLK_UNDERSCORE = '_',
SDLK_BACKQUOTE = '`',
SDLK_a = 'a',
SDLK_b = 'b',
SDLK_c = 'c',
SDLK_d = 'd',
SDLK_e = 'e',
SDLK_f = 'f',
SDLK_g = 'g',
SDLK_h = 'h',
SDLK_i = 'i',
SDLK_j = 'j',
SDLK_k = 'k',
SDLK_l = 'l',
SDLK_m = 'm',
SDLK_n = 'n',
SDLK_o = 'o',
SDLK_p = 'p',
SDLK_q = 'q',
SDLK_r = 'r',
SDLK_s = 's',
SDLK_t = 't',
SDLK_u = 'u',
SDLK_v = 'v',
SDLK_w = 'w',
SDLK_x = 'x',
SDLK_y = 'y',
SDLK_z = 'z',
SDLK_CAPSLOCK = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CAPSLOCK),
SDLK_F1 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F1),
SDLK_F2 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F2),
SDLK_F3 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F3),
SDLK_F4 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F4),
SDLK_F5 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F5),
SDLK_F6 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F6),
SDLK_F7 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F7),
SDLK_F8 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F8),
SDLK_F9 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F9),
SDLK_F10 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F10),
SDLK_F11 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F11),
SDLK_F12 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F12),
SDLK_PRINTSCREEN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_PRINTSCREEN),
SDLK_SCROLLLOCK = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SCROLLLOCK),
SDLK_PAUSE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_PAUSE),
SDLK_INSERT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_INSERT),
SDLK_HOME = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_HOME),
SDLK_PAGEUP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_PAGEUP),
SDLK_DELETE = '\x7F',
SDLK_END = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_END),
SDLK_PAGEDOWN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_PAGEDOWN),
SDLK_RIGHT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_RIGHT),
SDLK_LEFT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_LEFT),
SDLK_DOWN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_DOWN),
SDLK_UP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_UP),
SDLK_NUMLOCKCLEAR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_NUMLOCKCLEAR),
SDLK_KP_DIVIDE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_DIVIDE),
SDLK_KP_MULTIPLY = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MULTIPLY),
SDLK_KP_MINUS = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MINUS),
SDLK_KP_PLUS = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_PLUS),
SDLK_KP_ENTER = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_ENTER),
SDLK_KP_1 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_1),
SDLK_KP_2 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_2),
SDLK_KP_3 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_3),
SDLK_KP_4 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_4),
SDLK_KP_5 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_5),
SDLK_KP_6 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_6),
SDLK_KP_7 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_7),
SDLK_KP_8 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_8),
SDLK_KP_9 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_9),
SDLK_KP_0 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_0),
SDLK_KP_PERIOD = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_PERIOD),
SDLK_APPLICATION = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_APPLICATION),
SDLK_POWER = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_POWER),
SDLK_KP_EQUALS = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_EQUALS),
SDLK_F13 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F13),
SDLK_F14 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F14),
SDLK_F15 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F15),
SDLK_F16 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F16),
SDLK_F17 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F17),
SDLK_F18 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F18),
SDLK_F19 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F19),
SDLK_F20 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F20),
SDLK_F21 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F21),
SDLK_F22 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F22),
SDLK_F23 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F23),
SDLK_F24 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_F24),
SDLK_EXECUTE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_EXECUTE),
SDLK_HELP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_HELP),
SDLK_MENU = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_MENU),
SDLK_SELECT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SELECT),
SDLK_STOP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_STOP),
SDLK_AGAIN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AGAIN),
SDLK_UNDO = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_UNDO),
SDLK_CUT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CUT),
SDLK_COPY = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_COPY),
SDLK_PASTE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_PASTE),
SDLK_FIND = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_FIND),
SDLK_MUTE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_MUTE),
SDLK_VOLUMEUP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_VOLUMEUP),
SDLK_VOLUMEDOWN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_VOLUMEDOWN),
SDLK_KP_COMMA = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_COMMA),
SDLK_KP_EQUALSAS400 =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_EQUALSAS400),
SDLK_ALTERASE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_ALTERASE),
SDLK_SYSREQ = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SYSREQ),
SDLK_CANCEL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CANCEL),
SDLK_CLEAR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CLEAR),
SDLK_PRIOR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_PRIOR),
SDLK_RETURN2 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_RETURN2),
SDLK_SEPARATOR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SEPARATOR),
SDLK_OUT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_OUT),
SDLK_OPER = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_OPER),
SDLK_CLEARAGAIN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CLEARAGAIN),
SDLK_CRSEL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CRSEL),
SDLK_EXSEL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_EXSEL),
SDLK_KP_00 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_00),
SDLK_KP_000 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_000),
SDLK_THOUSANDSSEPARATOR =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_THOUSANDSSEPARATOR),
SDLK_DECIMALSEPARATOR =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_DECIMALSEPARATOR),
SDLK_CURRENCYUNIT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CURRENCYUNIT),
SDLK_CURRENCYSUBUNIT =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CURRENCYSUBUNIT),
SDLK_KP_LEFTPAREN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_LEFTPAREN),
SDLK_KP_RIGHTPAREN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_RIGHTPAREN),
SDLK_KP_LEFTBRACE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_LEFTBRACE),
SDLK_KP_RIGHTBRACE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_RIGHTBRACE),
SDLK_KP_TAB = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_TAB),
SDLK_KP_BACKSPACE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_BACKSPACE),
SDLK_KP_A = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_A),
SDLK_KP_B = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_B),
SDLK_KP_C = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_C),
SDLK_KP_D = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_D),
SDLK_KP_E = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_E),
SDLK_KP_F = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_F),
SDLK_KP_XOR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_XOR),
SDLK_KP_POWER = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_POWER),
SDLK_KP_PERCENT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_PERCENT),
SDLK_KP_LESS = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_LESS),
SDLK_KP_GREATER = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_GREATER),
SDLK_KP_AMPERSAND = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_AMPERSAND),
SDLK_KP_DBLAMPERSAND =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_DBLAMPERSAND),
SDLK_KP_VERTICALBAR =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_VERTICALBAR),
SDLK_KP_DBLVERTICALBAR =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_DBLVERTICALBAR),
SDLK_KP_COLON = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_COLON),
SDLK_KP_HASH = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_HASH),
SDLK_KP_SPACE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_SPACE),
SDLK_KP_AT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_AT),
SDLK_KP_EXCLAM = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_EXCLAM),
SDLK_KP_MEMSTORE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMSTORE),
SDLK_KP_MEMRECALL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMRECALL),
SDLK_KP_MEMCLEAR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMCLEAR),
SDLK_KP_MEMADD = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMADD),
SDLK_KP_MEMSUBTRACT =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMSUBTRACT),
SDLK_KP_MEMMULTIPLY =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMMULTIPLY),
SDLK_KP_MEMDIVIDE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_MEMDIVIDE),
SDLK_KP_PLUSMINUS = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_PLUSMINUS),
SDLK_KP_CLEAR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_CLEAR),
SDLK_KP_CLEARENTRY = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_CLEARENTRY),
SDLK_KP_BINARY = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_BINARY),
SDLK_KP_OCTAL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_OCTAL),
SDLK_KP_DECIMAL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_DECIMAL),
SDLK_KP_HEXADECIMAL =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KP_HEXADECIMAL),
SDLK_LCTRL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_LCTRL),
SDLK_LSHIFT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_LSHIFT),
SDLK_LALT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_LALT),
SDLK_LGUI = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_LGUI),
SDLK_RCTRL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_RCTRL),
SDLK_RSHIFT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_RSHIFT),
SDLK_RALT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_RALT),
SDLK_RGUI = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_RGUI),
SDLK_MODE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_MODE),
SDLK_AUDIONEXT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIONEXT),
SDLK_AUDIOPREV = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIOPREV),
SDLK_AUDIOSTOP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIOSTOP),
SDLK_AUDIOPLAY = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIOPLAY),
SDLK_AUDIOMUTE = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIOMUTE),
SDLK_MEDIASELECT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_MEDIASELECT),
SDLK_WWW = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_WWW),
SDLK_MAIL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_MAIL),
SDLK_CALCULATOR = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CALCULATOR),
SDLK_COMPUTER = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_COMPUTER),
SDLK_AC_SEARCH = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_SEARCH),
SDLK_AC_HOME = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_HOME),
SDLK_AC_BACK = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_BACK),
SDLK_AC_FORWARD = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_FORWARD),
SDLK_AC_STOP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_STOP),
SDLK_AC_REFRESH = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_REFRESH),
SDLK_AC_BOOKMARKS = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AC_BOOKMARKS),
SDLK_BRIGHTNESSDOWN =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_BRIGHTNESSDOWN),
SDLK_BRIGHTNESSUP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_BRIGHTNESSUP),
SDLK_DISPLAYSWITCH = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_DISPLAYSWITCH),
SDLK_KBDILLUMTOGGLE =
SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KBDILLUMTOGGLE),
SDLK_KBDILLUMDOWN = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KBDILLUMDOWN),
SDLK_KBDILLUMUP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_KBDILLUMUP),
SDLK_EJECT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_EJECT),
SDLK_SLEEP = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SLEEP),
SDLK_APP1 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_APP1),
SDLK_APP2 = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_APP2),
SDLK_AUDIOREWIND = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIOREWIND),
SDLK_AUDIOFASTFORWARD = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_AUDIOFASTFORWARD),
SDLK_SOFTLEFT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SOFTLEFT),
SDLK_SOFTRIGHT = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_SOFTRIGHT),
SDLK_CALL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_CALL),
SDLK_ENDCALL = SDL_SCANCODE_TO_KEYCODE(SDL_SCANCODE_ENDCALL)
} SDL_KeyCode;
/**
* \brief Enumeration of valid key mods (possibly OR'd together).
*/
typedef enum
{
KMOD_NONE = 0x0000,
KMOD_LSHIFT = 0x0001,
KMOD_RSHIFT = 0x0002,
KMOD_LCTRL = 0x0040,
KMOD_RCTRL = 0x0080,
KMOD_LALT = 0x0100,
KMOD_RALT = 0x0200,
KMOD_LGUI = 0x0400,
KMOD_RGUI = 0x0800,
KMOD_NUM = 0x1000,
KMOD_CAPS = 0x2000,
KMOD_MODE = 0x4000,
KMOD_SCROLL = 0x8000,
KMOD_CTRL = KMOD_LCTRL | KMOD_RCTRL,
KMOD_SHIFT = KMOD_LSHIFT | KMOD_RSHIFT,
KMOD_ALT = KMOD_LALT | KMOD_RALT,
KMOD_GUI = KMOD_LGUI | KMOD_RGUI,
KMOD_RESERVED = KMOD_SCROLL /* This is for source-level compatibility with SDL 2.0.0. */
} SDL_Keymod;
#endif /* SDL_keycode_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_opengles2_gl2.h | #ifndef __gles2_gl2_h_
#define __gles2_gl2_h_ 1
#ifdef __cplusplus
extern "C" {
#endif
/*
** Copyright 2013-2020 The Khronos Group Inc.
** SPDX-License-Identifier: MIT
**
** This header is generated from the Khronos OpenGL / OpenGL ES XML
** API Registry. The current version of the Registry, generator scripts
** used to make the header, and the header can be found at
** https://github.com/KhronosGroup/OpenGL-Registry
*/
/*#include <GLES2/gl2platform.h>*/
#ifndef GL_APIENTRYP
#define GL_APIENTRYP GL_APIENTRY*
#endif
#ifndef GL_GLES_PROTOTYPES
#define GL_GLES_PROTOTYPES 1
#endif
/* Generated on date 20220530 */
/* Generated C header for:
* API: gles2
* Profile: common
* Versions considered: 2\.[0-9]
* Versions emitted: .*
* Default extensions included: None
* Additional extensions included: _nomatch_^
* Extensions removed: _nomatch_^
*/
#ifndef GL_ES_VERSION_2_0
#define GL_ES_VERSION_2_0 1
/*#include <KHR/khrplatform.h>*/
typedef khronos_int8_t GLbyte;
typedef khronos_float_t GLclampf;
typedef khronos_int32_t GLfixed;
typedef khronos_int16_t GLshort;
typedef khronos_uint16_t GLushort;
typedef void GLvoid;
typedef struct __GLsync *GLsync;
typedef khronos_int64_t GLint64;
typedef khronos_uint64_t GLuint64;
typedef unsigned int GLenum;
typedef unsigned int GLuint;
typedef char GLchar;
typedef khronos_float_t GLfloat;
typedef khronos_ssize_t GLsizeiptr;
typedef khronos_intptr_t GLintptr;
typedef unsigned int GLbitfield;
typedef int GLint;
typedef unsigned char GLboolean;
typedef int GLsizei;
typedef khronos_uint8_t GLubyte;
#define GL_DEPTH_BUFFER_BIT 0x00000100
#define GL_STENCIL_BUFFER_BIT 0x00000400
#define GL_COLOR_BUFFER_BIT 0x00004000
#define GL_FALSE 0
#define GL_TRUE 1
#define GL_POINTS 0x0000
#define GL_LINES 0x0001
#define GL_LINE_LOOP 0x0002
#define GL_LINE_STRIP 0x0003
#define GL_TRIANGLES 0x0004
#define GL_TRIANGLE_STRIP 0x0005
#define GL_TRIANGLE_FAN 0x0006
#define GL_ZERO 0
#define GL_ONE 1
#define GL_SRC_COLOR 0x0300
#define GL_ONE_MINUS_SRC_COLOR 0x0301
#define GL_SRC_ALPHA 0x0302
#define GL_ONE_MINUS_SRC_ALPHA 0x0303
#define GL_DST_ALPHA 0x0304
#define GL_ONE_MINUS_DST_ALPHA 0x0305
#define GL_DST_COLOR 0x0306
#define GL_ONE_MINUS_DST_COLOR 0x0307
#define GL_SRC_ALPHA_SATURATE 0x0308
#define GL_FUNC_ADD 0x8006
#define GL_BLEND_EQUATION 0x8009
#define GL_BLEND_EQUATION_RGB 0x8009
#define GL_BLEND_EQUATION_ALPHA 0x883D
#define GL_FUNC_SUBTRACT 0x800A
#define GL_FUNC_REVERSE_SUBTRACT 0x800B
#define GL_BLEND_DST_RGB 0x80C8
#define GL_BLEND_SRC_RGB 0x80C9
#define GL_BLEND_DST_ALPHA 0x80CA
#define GL_BLEND_SRC_ALPHA 0x80CB
#define GL_CONSTANT_COLOR 0x8001
#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002
#define GL_CONSTANT_ALPHA 0x8003
#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
#define GL_BLEND_COLOR 0x8005
#define GL_ARRAY_BUFFER 0x8892
#define GL_ELEMENT_ARRAY_BUFFER 0x8893
#define GL_ARRAY_BUFFER_BINDING 0x8894
#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
#define GL_STREAM_DRAW 0x88E0
#define GL_STATIC_DRAW 0x88E4
#define GL_DYNAMIC_DRAW 0x88E8
#define GL_BUFFER_SIZE 0x8764
#define GL_BUFFER_USAGE 0x8765
#define GL_CURRENT_VERTEX_ATTRIB 0x8626
#define GL_FRONT 0x0404
#define GL_BACK 0x0405
#define GL_FRONT_AND_BACK 0x0408
#define GL_TEXTURE_2D 0x0DE1
#define GL_CULL_FACE 0x0B44
#define GL_BLEND 0x0BE2
#define GL_DITHER 0x0BD0
#define GL_STENCIL_TEST 0x0B90
#define GL_DEPTH_TEST 0x0B71
#define GL_SCISSOR_TEST 0x0C11
#define GL_POLYGON_OFFSET_FILL 0x8037
#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
#define GL_SAMPLE_COVERAGE 0x80A0
#define GL_NO_ERROR 0
#define GL_INVALID_ENUM 0x0500
#define GL_INVALID_VALUE 0x0501
#define GL_INVALID_OPERATION 0x0502
#define GL_OUT_OF_MEMORY 0x0505
#define GL_CW 0x0900
#define GL_CCW 0x0901
#define GL_LINE_WIDTH 0x0B21
#define GL_ALIASED_POINT_SIZE_RANGE 0x846D
#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E
#define GL_CULL_FACE_MODE 0x0B45
#define GL_FRONT_FACE 0x0B46
#define GL_DEPTH_RANGE 0x0B70
#define GL_DEPTH_WRITEMASK 0x0B72
#define GL_DEPTH_CLEAR_VALUE 0x0B73
#define GL_DEPTH_FUNC 0x0B74
#define GL_STENCIL_CLEAR_VALUE 0x0B91
#define GL_STENCIL_FUNC 0x0B92
#define GL_STENCIL_FAIL 0x0B94
#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96
#define GL_STENCIL_REF 0x0B97
#define GL_STENCIL_VALUE_MASK 0x0B93
#define GL_STENCIL_WRITEMASK 0x0B98
#define GL_STENCIL_BACK_FUNC 0x8800
#define GL_STENCIL_BACK_FAIL 0x8801
#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802
#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803
#define GL_STENCIL_BACK_REF 0x8CA3
#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4
#define GL_STENCIL_BACK_WRITEMASK 0x8CA5
#define GL_VIEWPORT 0x0BA2
#define GL_SCISSOR_BOX 0x0C10
#define GL_COLOR_CLEAR_VALUE 0x0C22
#define GL_COLOR_WRITEMASK 0x0C23
#define GL_UNPACK_ALIGNMENT 0x0CF5
#define GL_PACK_ALIGNMENT 0x0D05
#define GL_MAX_TEXTURE_SIZE 0x0D33
#define GL_MAX_VIEWPORT_DIMS 0x0D3A
#define GL_SUBPIXEL_BITS 0x0D50
#define GL_RED_BITS 0x0D52
#define GL_GREEN_BITS 0x0D53
#define GL_BLUE_BITS 0x0D54
#define GL_ALPHA_BITS 0x0D55
#define GL_DEPTH_BITS 0x0D56
#define GL_STENCIL_BITS 0x0D57
#define GL_POLYGON_OFFSET_UNITS 0x2A00
#define GL_POLYGON_OFFSET_FACTOR 0x8038
#define GL_TEXTURE_BINDING_2D 0x8069
#define GL_SAMPLE_BUFFERS 0x80A8
#define GL_SAMPLES 0x80A9
#define GL_SAMPLE_COVERAGE_VALUE 0x80AA
#define GL_SAMPLE_COVERAGE_INVERT 0x80AB
#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
#define GL_DONT_CARE 0x1100
#define GL_FASTEST 0x1101
#define GL_NICEST 0x1102
#define GL_GENERATE_MIPMAP_HINT 0x8192
#define GL_BYTE 0x1400
#define GL_UNSIGNED_BYTE 0x1401
#define GL_SHORT 0x1402
#define GL_UNSIGNED_SHORT 0x1403
#define GL_INT 0x1404
#define GL_UNSIGNED_INT 0x1405
#define GL_FLOAT 0x1406
#define GL_FIXED 0x140C
#define GL_DEPTH_COMPONENT 0x1902
#define GL_ALPHA 0x1906
#define GL_RGB 0x1907
#define GL_RGBA 0x1908
#define GL_LUMINANCE 0x1909
#define GL_LUMINANCE_ALPHA 0x190A
#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033
#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034
#define GL_UNSIGNED_SHORT_5_6_5 0x8363
#define GL_FRAGMENT_SHADER 0x8B30
#define GL_VERTEX_SHADER 0x8B31
#define GL_MAX_VERTEX_ATTRIBS 0x8869
#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
#define GL_MAX_VARYING_VECTORS 0x8DFC
#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D
#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C
#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872
#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
#define GL_SHADER_TYPE 0x8B4F
#define GL_DELETE_STATUS 0x8B80
#define GL_LINK_STATUS 0x8B82
#define GL_VALIDATE_STATUS 0x8B83
#define GL_ATTACHED_SHADERS 0x8B85
#define GL_ACTIVE_UNIFORMS 0x8B86
#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87
#define GL_ACTIVE_ATTRIBUTES 0x8B89
#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A
#define GL_SHADING_LANGUAGE_VERSION 0x8B8C
#define GL_CURRENT_PROGRAM 0x8B8D
#define GL_NEVER 0x0200
#define GL_LESS 0x0201
#define GL_EQUAL 0x0202
#define GL_LEQUAL 0x0203
#define GL_GREATER 0x0204
#define GL_NOTEQUAL 0x0205
#define GL_GEQUAL 0x0206
#define GL_ALWAYS 0x0207
#define GL_KEEP 0x1E00
#define GL_REPLACE 0x1E01
#define GL_INCR 0x1E02
#define GL_DECR 0x1E03
#define GL_INVERT 0x150A
#define GL_INCR_WRAP 0x8507
#define GL_DECR_WRAP 0x8508
#define GL_VENDOR 0x1F00
#define GL_RENDERER 0x1F01
#define GL_VERSION 0x1F02
#define GL_EXTENSIONS 0x1F03
#define GL_NEAREST 0x2600
#define GL_LINEAR 0x2601
#define GL_NEAREST_MIPMAP_NEAREST 0x2700
#define GL_LINEAR_MIPMAP_NEAREST 0x2701
#define GL_NEAREST_MIPMAP_LINEAR 0x2702
#define GL_LINEAR_MIPMAP_LINEAR 0x2703
#define GL_TEXTURE_MAG_FILTER 0x2800
#define GL_TEXTURE_MIN_FILTER 0x2801
#define GL_TEXTURE_WRAP_S 0x2802
#define GL_TEXTURE_WRAP_T 0x2803
#define GL_TEXTURE 0x1702
#define GL_TEXTURE_CUBE_MAP 0x8513
#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514
#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
#define GL_TEXTURE0 0x84C0
#define GL_TEXTURE1 0x84C1
#define GL_TEXTURE2 0x84C2
#define GL_TEXTURE3 0x84C3
#define GL_TEXTURE4 0x84C4
#define GL_TEXTURE5 0x84C5
#define GL_TEXTURE6 0x84C6
#define GL_TEXTURE7 0x84C7
#define GL_TEXTURE8 0x84C8
#define GL_TEXTURE9 0x84C9
#define GL_TEXTURE10 0x84CA
#define GL_TEXTURE11 0x84CB
#define GL_TEXTURE12 0x84CC
#define GL_TEXTURE13 0x84CD
#define GL_TEXTURE14 0x84CE
#define GL_TEXTURE15 0x84CF
#define GL_TEXTURE16 0x84D0
#define GL_TEXTURE17 0x84D1
#define GL_TEXTURE18 0x84D2
#define GL_TEXTURE19 0x84D3
#define GL_TEXTURE20 0x84D4
#define GL_TEXTURE21 0x84D5
#define GL_TEXTURE22 0x84D6
#define GL_TEXTURE23 0x84D7
#define GL_TEXTURE24 0x84D8
#define GL_TEXTURE25 0x84D9
#define GL_TEXTURE26 0x84DA
#define GL_TEXTURE27 0x84DB
#define GL_TEXTURE28 0x84DC
#define GL_TEXTURE29 0x84DD
#define GL_TEXTURE30 0x84DE
#define GL_TEXTURE31 0x84DF
#define GL_ACTIVE_TEXTURE 0x84E0
#define GL_REPEAT 0x2901
#define GL_CLAMP_TO_EDGE 0x812F
#define GL_MIRRORED_REPEAT 0x8370
#define GL_FLOAT_VEC2 0x8B50
#define GL_FLOAT_VEC3 0x8B51
#define GL_FLOAT_VEC4 0x8B52
#define GL_INT_VEC2 0x8B53
#define GL_INT_VEC3 0x8B54
#define GL_INT_VEC4 0x8B55
#define GL_BOOL 0x8B56
#define GL_BOOL_VEC2 0x8B57
#define GL_BOOL_VEC3 0x8B58
#define GL_BOOL_VEC4 0x8B59
#define GL_FLOAT_MAT2 0x8B5A
#define GL_FLOAT_MAT3 0x8B5B
#define GL_FLOAT_MAT4 0x8B5C
#define GL_SAMPLER_2D 0x8B5E
#define GL_SAMPLER_CUBE 0x8B60
#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622
#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623
#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624
#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625
#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A
#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645
#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A
#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B
#define GL_COMPILE_STATUS 0x8B81
#define GL_INFO_LOG_LENGTH 0x8B84
#define GL_SHADER_SOURCE_LENGTH 0x8B88
#define GL_SHADER_COMPILER 0x8DFA
#define GL_SHADER_BINARY_FORMATS 0x8DF8
#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
#define GL_LOW_FLOAT 0x8DF0
#define GL_MEDIUM_FLOAT 0x8DF1
#define GL_HIGH_FLOAT 0x8DF2
#define GL_LOW_INT 0x8DF3
#define GL_MEDIUM_INT 0x8DF4
#define GL_HIGH_INT 0x8DF5
#define GL_FRAMEBUFFER 0x8D40
#define GL_RENDERBUFFER 0x8D41
#define GL_RGBA4 0x8056
#define GL_RGB5_A1 0x8057
#define GL_RGB565 0x8D62
#define GL_DEPTH_COMPONENT16 0x81A5
#define GL_STENCIL_INDEX8 0x8D48
#define GL_RENDERBUFFER_WIDTH 0x8D42
#define GL_RENDERBUFFER_HEIGHT 0x8D43
#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44
#define GL_RENDERBUFFER_RED_SIZE 0x8D50
#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51
#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52
#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53
#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54
#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55
#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0
#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3
#define GL_COLOR_ATTACHMENT0 0x8CE0
#define GL_DEPTH_ATTACHMENT 0x8D00
#define GL_STENCIL_ATTACHMENT 0x8D20
#define GL_NONE 0
#define GL_FRAMEBUFFER_COMPLETE 0x8CD5
#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6
#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7
#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9
#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD
#define GL_FRAMEBUFFER_BINDING 0x8CA6
#define GL_RENDERBUFFER_BINDING 0x8CA7
#define GL_MAX_RENDERBUFFER_SIZE 0x84E8
#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture);
typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader);
typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name);
typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer);
typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer);
typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer);
typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture);
typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode);
typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha);
typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor);
typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target);
typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask);
typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d);
typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s);
typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader);
typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void);
typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type);
typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode);
typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers);
typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers);
typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program);
typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers);
typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader);
typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures);
typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func);
typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag);
typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f);
typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader);
typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap);
typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index);
typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count);
typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices);
typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap);
typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index);
typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void);
typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode);
typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers);
typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target);
typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers);
typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers);
typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures);
typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders);
typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name);
typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data);
typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void);
typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data);
typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data);
typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision);
typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name);
typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params);
typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params);
typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params);
typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name);
typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params);
typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params);
typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer);
typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode);
typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer);
typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap);
typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer);
typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program);
typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer);
typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader);
typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture);
typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width);
typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program);
typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param);
typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units);
typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels);
typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void);
typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert);
typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height);
typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryFormat, const void *binary, GLsizei length);
typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask);
typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask);
typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask);
typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask);
typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass);
typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param);
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params);
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param);
typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params);
typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0);
typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0);
typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1);
typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1);
typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2);
typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program);
typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v);
typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height);
#if GL_GLES_PROTOTYPES
GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture);
GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader);
GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name);
GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer);
GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer);
GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer);
GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture);
GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode);
GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha);
GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor);
GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target);
GL_APICALL void GL_APIENTRY glClear (GLbitfield mask);
GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d);
GL_APICALL void GL_APIENTRY glClearStencil (GLint s);
GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader);
GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
GL_APICALL GLuint GL_APIENTRY glCreateProgram (void);
GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type);
GL_APICALL void GL_APIENTRY glCullFace (GLenum mode);
GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers);
GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers);
GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program);
GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers);
GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader);
GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures);
GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func);
GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag);
GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f);
GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader);
GL_APICALL void GL_APIENTRY glDisable (GLenum cap);
GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index);
GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count);
GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices);
GL_APICALL void GL_APIENTRY glEnable (GLenum cap);
GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index);
GL_APICALL void GL_APIENTRY glFinish (void);
GL_APICALL void GL_APIENTRY glFlush (void);
GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode);
GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers);
GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target);
GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers);
GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers);
GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures);
GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders);
GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name);
GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data);
GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params);
GL_APICALL GLenum GL_APIENTRY glGetError (void);
GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data);
GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data);
GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision);
GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name);
GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params);
GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params);
GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params);
GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name);
GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params);
GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params);
GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer);
GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode);
GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer);
GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap);
GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer);
GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program);
GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer);
GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader);
GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture);
GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width);
GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program);
GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param);
GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units);
GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels);
GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void);
GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert);
GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryFormat, const void *binary, GLsizei length);
GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask);
GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass);
GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param);
GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params);
GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param);
GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params);
GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0);
GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0);
GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1);
GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1);
GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2);
GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value);
GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GL_APICALL void GL_APIENTRY glUseProgram (GLuint program);
GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program);
GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x);
GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v);
GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y);
GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v);
GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z);
GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v);
GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v);
GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height);
#endif
#endif /* GL_ES_VERSION_2_0 */
#ifdef __cplusplus
}
#endif
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_blendmode.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_blendmode.h
*
* Header file declaring the SDL_BlendMode enumeration
*/
#ifndef SDL_blendmode_h_
#define SDL_blendmode_h_
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief The blend mode used in SDL_RenderCopy() and drawing operations.
*/
typedef enum
{
SDL_BLENDMODE_NONE = 0x00000000, /**< no blending
dstRGBA = srcRGBA */
SDL_BLENDMODE_BLEND = 0x00000001, /**< alpha blending
dstRGB = (srcRGB * srcA) + (dstRGB * (1-srcA))
dstA = srcA + (dstA * (1-srcA)) */
SDL_BLENDMODE_ADD = 0x00000002, /**< additive blending
dstRGB = (srcRGB * srcA) + dstRGB
dstA = dstA */
SDL_BLENDMODE_MOD = 0x00000004, /**< color modulate
dstRGB = srcRGB * dstRGB
dstA = dstA */
SDL_BLENDMODE_MUL = 0x00000008, /**< color multiply
dstRGB = (srcRGB * dstRGB) + (dstRGB * (1-srcA))
dstA = (srcA * dstA) + (dstA * (1-srcA)) */
SDL_BLENDMODE_INVALID = 0x7FFFFFFF
/* Additional custom blend modes can be returned by SDL_ComposeCustomBlendMode() */
} SDL_BlendMode;
/**
* \brief The blend operation used when combining source and destination pixel components
*/
typedef enum
{
SDL_BLENDOPERATION_ADD = 0x1, /**< dst + src: supported by all renderers */
SDL_BLENDOPERATION_SUBTRACT = 0x2, /**< dst - src : supported by D3D9, D3D11, OpenGL, OpenGLES */
SDL_BLENDOPERATION_REV_SUBTRACT = 0x3, /**< src - dst : supported by D3D9, D3D11, OpenGL, OpenGLES */
SDL_BLENDOPERATION_MINIMUM = 0x4, /**< min(dst, src) : supported by D3D9, D3D11 */
SDL_BLENDOPERATION_MAXIMUM = 0x5 /**< max(dst, src) : supported by D3D9, D3D11 */
} SDL_BlendOperation;
/**
* \brief The normalized factor used to multiply pixel components
*/
typedef enum
{
SDL_BLENDFACTOR_ZERO = 0x1, /**< 0, 0, 0, 0 */
SDL_BLENDFACTOR_ONE = 0x2, /**< 1, 1, 1, 1 */
SDL_BLENDFACTOR_SRC_COLOR = 0x3, /**< srcR, srcG, srcB, srcA */
SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR = 0x4, /**< 1-srcR, 1-srcG, 1-srcB, 1-srcA */
SDL_BLENDFACTOR_SRC_ALPHA = 0x5, /**< srcA, srcA, srcA, srcA */
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA = 0x6, /**< 1-srcA, 1-srcA, 1-srcA, 1-srcA */
SDL_BLENDFACTOR_DST_COLOR = 0x7, /**< dstR, dstG, dstB, dstA */
SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR = 0x8, /**< 1-dstR, 1-dstG, 1-dstB, 1-dstA */
SDL_BLENDFACTOR_DST_ALPHA = 0x9, /**< dstA, dstA, dstA, dstA */
SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA = 0xA /**< 1-dstA, 1-dstA, 1-dstA, 1-dstA */
} SDL_BlendFactor;
/**
* Compose a custom blend mode for renderers.
*
* The functions SDL_SetRenderDrawBlendMode and SDL_SetTextureBlendMode accept
* the SDL_BlendMode returned by this function if the renderer supports it.
*
* A blend mode controls how the pixels from a drawing operation (source) get
* combined with the pixels from the render target (destination). First, the
* components of the source and destination pixels get multiplied with their
* blend factors. Then, the blend operation takes the two products and
* calculates the result that will get stored in the render target.
*
* Expressed in pseudocode, it would look like this:
*
* ```c
* dstRGB = colorOperation(srcRGB * srcColorFactor, dstRGB * dstColorFactor);
* dstA = alphaOperation(srcA * srcAlphaFactor, dstA * dstAlphaFactor);
* ```
*
* Where the functions `colorOperation(src, dst)` and `alphaOperation(src,
* dst)` can return one of the following:
*
* - `src + dst`
* - `src - dst`
* - `dst - src`
* - `min(src, dst)`
* - `max(src, dst)`
*
* The red, green, and blue components are always multiplied with the first,
* second, and third components of the SDL_BlendFactor, respectively. The
* fourth component is not used.
*
* The alpha component is always multiplied with the fourth component of the
* SDL_BlendFactor. The other components are not used in the alpha
* calculation.
*
* Support for these blend modes varies for each renderer. To check if a
* specific SDL_BlendMode is supported, create a renderer and pass it to
* either SDL_SetRenderDrawBlendMode or SDL_SetTextureBlendMode. They will
* return with an error if the blend mode is not supported.
*
* This list describes the support of custom blend modes for each renderer in
* SDL 2.0.6. All renderers support the four blend modes listed in the
* SDL_BlendMode enumeration.
*
* - **direct3d**: Supports all operations with all factors. However, some
* factors produce unexpected results with `SDL_BLENDOPERATION_MINIMUM` and
* `SDL_BLENDOPERATION_MAXIMUM`.
* - **direct3d11**: Same as Direct3D 9.
* - **opengl**: Supports the `SDL_BLENDOPERATION_ADD` operation with all
* factors. OpenGL versions 1.1, 1.2, and 1.3 do not work correctly with SDL
* 2.0.6.
* - **opengles**: Supports the `SDL_BLENDOPERATION_ADD` operation with all
* factors. Color and alpha factors need to be the same. OpenGL ES 1
* implementation specific: May also support `SDL_BLENDOPERATION_SUBTRACT`
* and `SDL_BLENDOPERATION_REV_SUBTRACT`. May support color and alpha
* operations being different from each other. May support color and alpha
* factors being different from each other.
* - **opengles2**: Supports the `SDL_BLENDOPERATION_ADD`,
* `SDL_BLENDOPERATION_SUBTRACT`, `SDL_BLENDOPERATION_REV_SUBTRACT`
* operations with all factors.
* - **psp**: No custom blend mode support.
* - **software**: No custom blend mode support.
*
* Some renderers do not provide an alpha component for the default render
* target. The `SDL_BLENDFACTOR_DST_ALPHA` and
* `SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA` factors do not have an effect in this
* case.
*
* \param srcColorFactor the SDL_BlendFactor applied to the red, green, and
* blue components of the source pixels
* \param dstColorFactor the SDL_BlendFactor applied to the red, green, and
* blue components of the destination pixels
* \param colorOperation the SDL_BlendOperation used to combine the red,
* green, and blue components of the source and
* destination pixels
* \param srcAlphaFactor the SDL_BlendFactor applied to the alpha component of
* the source pixels
* \param dstAlphaFactor the SDL_BlendFactor applied to the alpha component of
* the destination pixels
* \param alphaOperation the SDL_BlendOperation used to combine the alpha
* component of the source and destination pixels
* \returns an SDL_BlendMode that represents the chosen factors and
* operations.
*
* \since This function is available since SDL 2.0.6.
*
* \sa SDL_SetRenderDrawBlendMode
* \sa SDL_GetRenderDrawBlendMode
* \sa SDL_SetTextureBlendMode
* \sa SDL_GetTextureBlendMode
*/
extern DECLSPEC SDL_BlendMode SDLCALL SDL_ComposeCustomBlendMode(SDL_BlendFactor srcColorFactor,
SDL_BlendFactor dstColorFactor,
SDL_BlendOperation colorOperation,
SDL_BlendFactor srcAlphaFactor,
SDL_BlendFactor dstAlphaFactor,
SDL_BlendOperation alphaOperation);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_blendmode_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_filesystem.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_filesystem.h
*
* \brief Include file for filesystem SDL API functions
*/
#ifndef SDL_filesystem_h_
#define SDL_filesystem_h_
#include "SDL_stdinc.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/**
* Get the directory where the application was run from.
*
* This is not necessarily a fast call, so you should call this once near
* startup and save the string if you need it.
*
* **Mac OS X and iOS Specific Functionality**: If the application is in a
* ".app" bundle, this function returns the Resource directory (e.g.
* MyApp.app/Contents/Resources/). This behaviour can be overridden by adding
* a property to the Info.plist file. Adding a string key with the name
* SDL_FILESYSTEM_BASE_DIR_TYPE with a supported value will change the
* behaviour.
*
* Supported values for the SDL_FILESYSTEM_BASE_DIR_TYPE property (Given an
* application in /Applications/SDLApp/MyApp.app):
*
* - `resource`: bundle resource directory (the default). For example:
* `/Applications/SDLApp/MyApp.app/Contents/Resources`
* - `bundle`: the Bundle directory. For example:
* `/Applications/SDLApp/MyApp.app/`
* - `parent`: the containing directory of the bundle. For example:
* `/Applications/SDLApp/`
*
* **Nintendo 3DS Specific Functionality**: This function returns "romfs"
* directory of the application as it is uncommon to store resources outside
* the executable. As such it is not a writable directory.
*
* The returned path is guaranteed to end with a path separator ('\' on
* Windows, '/' on most other platforms).
*
* The pointer returned is owned by the caller. Please call SDL_free() on the
* pointer when done with it.
*
* \returns an absolute path in UTF-8 encoding to the application data
* directory. NULL will be returned on error or when the platform
* doesn't implement this functionality, call SDL_GetError() for more
* information.
*
* \since This function is available since SDL 2.0.1.
*
* \sa SDL_GetPrefPath
*/
extern DECLSPEC char *SDLCALL SDL_GetBasePath(void);
/**
* Get the user-and-app-specific path where files can be written.
*
* Get the "pref dir". This is meant to be where users can write personal
* files (preferences and save games, etc) that are specific to your
* application. This directory is unique per user, per application.
*
* This function will decide the appropriate location in the native
* filesystem, create the directory if necessary, and return a string of the
* absolute path to the directory in UTF-8 encoding.
*
* On Windows, the string might look like:
*
* `C:\\Users\\bob\\AppData\\Roaming\\My Company\\My Program Name\\`
*
* On Linux, the string might look like:
*
* `/home/bob/.local/share/My Program Name/`
*
* On Mac OS X, the string might look like:
*
* `/Users/bob/Library/Application Support/My Program Name/`
*
* You should assume the path returned by this function is the only safe place
* to write files (and that SDL_GetBasePath(), while it might be writable, or
* even the parent of the returned path, isn't where you should be writing
* things).
*
* Both the org and app strings may become part of a directory name, so please
* follow these rules:
*
* - Try to use the same org string (_including case-sensitivity_) for all
* your applications that use this function.
* - Always use a unique app string for each one, and make sure it never
* changes for an app once you've decided on it.
* - Unicode characters are legal, as long as it's UTF-8 encoded, but...
* - ...only use letters, numbers, and spaces. Avoid punctuation like "Game
* Name 2: Bad Guy's Revenge!" ... "Game Name 2" is sufficient.
*
* The returned path is guaranteed to end with a path separator ('\' on
* Windows, '/' on most other platforms).
*
* The pointer returned is owned by the caller. Please call SDL_free() on the
* pointer when done with it.
*
* \param org the name of your organization
* \param app the name of your application
* \returns a UTF-8 string of the user directory in platform-dependent
* notation. NULL if there's a problem (creating directory failed,
* etc.).
*
* \since This function is available since SDL 2.0.1.
*
* \sa SDL_GetBasePath
*/
extern DECLSPEC char *SDLCALL SDL_GetPrefPath(const char *org, const char *app);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_filesystem_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_keyboard.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_keyboard.h
*
* Include file for SDL keyboard event handling
*/
#ifndef SDL_keyboard_h_
#define SDL_keyboard_h_
#include "SDL_stdinc.h"
#include "SDL_error.h"
#include "SDL_keycode.h"
#include "SDL_video.h"
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief The SDL keysym structure, used in key events.
*
* \note If you are looking for translated character input, see the ::SDL_TEXTINPUT event.
*/
typedef struct SDL_Keysym
{
SDL_Scancode scancode; /**< SDL physical key code - see ::SDL_Scancode for details */
SDL_Keycode sym; /**< SDL virtual key code - see ::SDL_Keycode for details */
Uint16 mod; /**< current key modifiers */
Uint32 unused;
} SDL_Keysym;
/* Function prototypes */
/**
* Query the window which currently has keyboard focus.
*
* \returns the window with keyboard focus.
*
* \since This function is available since SDL 2.0.0.
*/
extern DECLSPEC SDL_Window * SDLCALL SDL_GetKeyboardFocus(void);
/**
* Get a snapshot of the current state of the keyboard.
*
* The pointer returned is a pointer to an internal SDL array. It will be
* valid for the whole lifetime of the application and should not be freed by
* the caller.
*
* A array element with a value of 1 means that the key is pressed and a value
* of 0 means that it is not. Indexes into this array are obtained by using
* SDL_Scancode values.
*
* Use SDL_PumpEvents() to update the state array.
*
* This function gives you the current state after all events have been
* processed, so if a key or button has been pressed and released before you
* process events, then the pressed state will never show up in the
* SDL_GetKeyboardState() calls.
*
* Note: This function doesn't take into account whether shift has been
* pressed or not.
*
* \param numkeys if non-NULL, receives the length of the returned array
* \returns a pointer to an array of key states.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_PumpEvents
* \sa SDL_ResetKeyboard
*/
extern DECLSPEC const Uint8 *SDLCALL SDL_GetKeyboardState(int *numkeys);
/**
* Clear the state of the keyboard
*
* This function will generate key up events for all pressed keys.
*
* \since This function is available since SDL 2.24.0.
*
* \sa SDL_GetKeyboardState
*/
extern DECLSPEC void SDLCALL SDL_ResetKeyboard(void);
/**
* Get the current key modifier state for the keyboard.
*
* \returns an OR'd combination of the modifier keys for the keyboard. See
* SDL_Keymod for details.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetKeyboardState
* \sa SDL_SetModState
*/
extern DECLSPEC SDL_Keymod SDLCALL SDL_GetModState(void);
/**
* Set the current key modifier state for the keyboard.
*
* The inverse of SDL_GetModState(), SDL_SetModState() allows you to impose
* modifier key states on your application. Simply pass your desired modifier
* states into `modstate`. This value may be a bitwise, OR'd combination of
* SDL_Keymod values.
*
* This does not change the keyboard state, only the key modifier flags that
* SDL reports.
*
* \param modstate the desired SDL_Keymod for the keyboard
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetModState
*/
extern DECLSPEC void SDLCALL SDL_SetModState(SDL_Keymod modstate);
/**
* Get the key code corresponding to the given scancode according to the
* current keyboard layout.
*
* See SDL_Keycode for details.
*
* \param scancode the desired SDL_Scancode to query
* \returns the SDL_Keycode that corresponds to the given SDL_Scancode.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetKeyName
* \sa SDL_GetScancodeFromKey
*/
extern DECLSPEC SDL_Keycode SDLCALL SDL_GetKeyFromScancode(SDL_Scancode scancode);
/**
* Get the scancode corresponding to the given key code according to the
* current keyboard layout.
*
* See SDL_Scancode for details.
*
* \param key the desired SDL_Keycode to query
* \returns the SDL_Scancode that corresponds to the given SDL_Keycode.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetKeyFromScancode
* \sa SDL_GetScancodeName
*/
extern DECLSPEC SDL_Scancode SDLCALL SDL_GetScancodeFromKey(SDL_Keycode key);
/**
* Get a human-readable name for a scancode.
*
* See SDL_Scancode for details.
*
* **Warning**: The returned name is by design not stable across platforms,
* e.g. the name for `SDL_SCANCODE_LGUI` is "Left GUI" under Linux but "Left
* Windows" under Microsoft Windows, and some scancodes like
* `SDL_SCANCODE_NONUSBACKSLASH` don't have any name at all. There are even
* scancodes that share names, e.g. `SDL_SCANCODE_RETURN` and
* `SDL_SCANCODE_RETURN2` (both called "Return"). This function is therefore
* unsuitable for creating a stable cross-platform two-way mapping between
* strings and scancodes.
*
* \param scancode the desired SDL_Scancode to query
* \returns a pointer to the name for the scancode. If the scancode doesn't
* have a name this function returns an empty string ("").
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetScancodeFromKey
* \sa SDL_GetScancodeFromName
*/
extern DECLSPEC const char *SDLCALL SDL_GetScancodeName(SDL_Scancode scancode);
/**
* Get a scancode from a human-readable name.
*
* \param name the human-readable scancode name
* \returns the SDL_Scancode, or `SDL_SCANCODE_UNKNOWN` if the name wasn't
* recognized; call SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetKeyFromName
* \sa SDL_GetScancodeFromKey
* \sa SDL_GetScancodeName
*/
extern DECLSPEC SDL_Scancode SDLCALL SDL_GetScancodeFromName(const char *name);
/**
* Get a human-readable name for a key.
*
* See SDL_Scancode and SDL_Keycode for details.
*
* \param key the desired SDL_Keycode to query
* \returns a pointer to a UTF-8 string that stays valid at least until the
* next call to this function. If you need it around any longer, you
* must copy it. If the key doesn't have a name, this function
* returns an empty string ("").
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetKeyFromName
* \sa SDL_GetKeyFromScancode
* \sa SDL_GetScancodeFromKey
*/
extern DECLSPEC const char *SDLCALL SDL_GetKeyName(SDL_Keycode key);
/**
* Get a key code from a human-readable name.
*
* \param name the human-readable key name
* \returns key code, or `SDLK_UNKNOWN` if the name wasn't recognized; call
* SDL_GetError() for more information.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_GetKeyFromScancode
* \sa SDL_GetKeyName
* \sa SDL_GetScancodeFromName
*/
extern DECLSPEC SDL_Keycode SDLCALL SDL_GetKeyFromName(const char *name);
/**
* Start accepting Unicode text input events.
*
* This function will start accepting Unicode text input events in the focused
* SDL window, and start emitting SDL_TextInputEvent (SDL_TEXTINPUT) and
* SDL_TextEditingEvent (SDL_TEXTEDITING) events. Please use this function in
* pair with SDL_StopTextInput().
*
* On some platforms using this function activates the screen keyboard.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_SetTextInputRect
* \sa SDL_StopTextInput
*/
extern DECLSPEC void SDLCALL SDL_StartTextInput(void);
/**
* Check whether or not Unicode text input events are enabled.
*
* \returns SDL_TRUE if text input events are enabled else SDL_FALSE.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_StartTextInput
*/
extern DECLSPEC SDL_bool SDLCALL SDL_IsTextInputActive(void);
/**
* Stop receiving any text input events.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_StartTextInput
*/
extern DECLSPEC void SDLCALL SDL_StopTextInput(void);
/**
* Dismiss the composition window/IME without disabling the subsystem.
*
* \since This function is available since SDL 2.0.22.
*
* \sa SDL_StartTextInput
* \sa SDL_StopTextInput
*/
extern DECLSPEC void SDLCALL SDL_ClearComposition(void);
/**
* Returns if an IME Composite or Candidate window is currently shown.
*
* \since This function is available since SDL 2.0.22.
*/
extern DECLSPEC SDL_bool SDLCALL SDL_IsTextInputShown(void);
/**
* Set the rectangle used to type Unicode text inputs.
*
* To start text input in a given location, this function is intended to be
* called before SDL_StartTextInput, although some platforms support moving
* the rectangle even while text input (and a composition) is active.
*
* Note: If you want to use the system native IME window, try setting hint
* **SDL_HINT_IME_SHOW_UI** to **1**, otherwise this function won't give you
* any feedback.
*
* \param rect the SDL_Rect structure representing the rectangle to receive
* text (ignored if NULL)
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_StartTextInput
*/
extern DECLSPEC void SDLCALL SDL_SetTextInputRect(const SDL_Rect *rect);
/**
* Check whether the platform has screen keyboard support.
*
* \returns SDL_TRUE if the platform has some screen keyboard support or
* SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_StartTextInput
* \sa SDL_IsScreenKeyboardShown
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasScreenKeyboardSupport(void);
/**
* Check whether the screen keyboard is shown for given window.
*
* \param window the window for which screen keyboard should be queried
* \returns SDL_TRUE if screen keyboard is shown or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_HasScreenKeyboardSupport
*/
extern DECLSPEC SDL_bool SDLCALL SDL_IsScreenKeyboardShown(SDL_Window *window);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_keyboard_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/VisualizedSamples/VisualMandlebrot/ThirdParty/include/SDL2/SDL_cpuinfo.h | /*
Simple DirectMedia Layer
Copyright (C) 1997-2022 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/**
* \file SDL_cpuinfo.h
*
* CPU feature detection for SDL.
*/
#ifndef SDL_cpuinfo_h_
#define SDL_cpuinfo_h_
#include "SDL_stdinc.h"
/* Need to do this here because intrin.h has C++ code in it */
/* Visual Studio 2005 has a bug where intrin.h conflicts with winnt.h */
#if defined(_MSC_VER) && (_MSC_VER >= 1500) && (defined(_M_IX86) || defined(_M_X64))
#ifdef __clang__
/* As of Clang 11, '_m_prefetchw' is conflicting with the winnt.h's version,
so we define the needed '_m_prefetch' here as a pseudo-header, until the issue is fixed. */
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetch(void *__P)
{
__builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
}
#endif /* __PRFCHWINTRIN_H */
#endif /* __clang__ */
#include <intrin.h>
#ifndef _WIN64
#ifndef __MMX__
#define __MMX__
#endif
#ifndef __3dNOW__
#define __3dNOW__
#endif
#endif
#ifndef __SSE__
#define __SSE__
#endif
#ifndef __SSE2__
#define __SSE2__
#endif
#ifndef __SSE3__
#define __SSE3__
#endif
#elif defined(__MINGW64_VERSION_MAJOR)
#include <intrin.h>
#if !defined(SDL_DISABLE_ARM_NEON_H) && defined(__ARM_NEON)
# include <arm_neon.h>
#endif
#else
/* altivec.h redefining bool causes a number of problems, see bugs 3993 and 4392, so you need to explicitly define SDL_ENABLE_ALTIVEC_H to have it included. */
#if defined(HAVE_ALTIVEC_H) && defined(__ALTIVEC__) && !defined(__APPLE_ALTIVEC__) && defined(SDL_ENABLE_ALTIVEC_H)
#include <altivec.h>
#endif
#if !defined(SDL_DISABLE_ARM_NEON_H)
# if defined(__ARM_NEON)
# include <arm_neon.h>
# elif defined(__WINDOWS__) || defined(__WINRT__) || defined(__GDK__)
/* Visual Studio doesn't define __ARM_ARCH, but _M_ARM (if set, always 7), and _M_ARM64 (if set, always 1). */
# if defined(_M_ARM)
# include <armintr.h>
# include <arm_neon.h>
# define __ARM_NEON 1 /* Set __ARM_NEON so that it can be used elsewhere, at compile time */
# endif
# if defined (_M_ARM64)
# include <arm64intr.h>
# include <arm64_neon.h>
# define __ARM_NEON 1 /* Set __ARM_NEON so that it can be used elsewhere, at compile time */
# define __ARM_ARCH 8
# endif
# endif
#endif
#endif /* compiler version */
#if defined(__3dNOW__) && !defined(SDL_DISABLE_MM3DNOW_H)
#include <mm3dnow.h>
#endif
#if defined(__loongarch_sx) && !defined(SDL_DISABLE_LSX_H)
#include <lsxintrin.h>
#define __LSX__
#endif
#if defined(__loongarch_asx) && !defined(SDL_DISABLE_LASX_H)
#include <lasxintrin.h>
#define __LASX__
#endif
#if defined(HAVE_IMMINTRIN_H) && !defined(SDL_DISABLE_IMMINTRIN_H)
#include <immintrin.h>
#else
#if defined(__MMX__) && !defined(SDL_DISABLE_MMINTRIN_H)
#include <mmintrin.h>
#endif
#if defined(__SSE__) && !defined(SDL_DISABLE_XMMINTRIN_H)
#include <xmmintrin.h>
#endif
#if defined(__SSE2__) && !defined(SDL_DISABLE_EMMINTRIN_H)
#include <emmintrin.h>
#endif
#if defined(__SSE3__) && !defined(SDL_DISABLE_PMMINTRIN_H)
#include <pmmintrin.h>
#endif
#endif /* HAVE_IMMINTRIN_H */
#include "begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/* This is a guess for the cacheline size used for padding.
* Most x86 processors have a 64 byte cache line.
* The 64-bit PowerPC processors have a 128 byte cache line.
* We'll use the larger value to be generally safe.
*/
#define SDL_CACHELINE_SIZE 128
/**
* Get the number of CPU cores available.
*
* \returns the total number of logical CPU cores. On CPUs that include
* technologies such as hyperthreading, the number of logical cores
* may be more than the number of physical cores.
*
* \since This function is available since SDL 2.0.0.
*/
extern DECLSPEC int SDLCALL SDL_GetCPUCount(void);
/**
* Determine the L1 cache line size of the CPU.
*
* This is useful for determining multi-threaded structure padding or SIMD
* prefetch sizes.
*
* \returns the L1 cache line size of the CPU, in bytes.
*
* \since This function is available since SDL 2.0.0.
*/
extern DECLSPEC int SDLCALL SDL_GetCPUCacheLineSize(void);
/**
* Determine whether the CPU has the RDTSC instruction.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has the RDTSC instruction or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasRDTSC(void);
/**
* Determine whether the CPU has AltiVec features.
*
* This always returns false on CPUs that aren't using PowerPC instruction
* sets.
*
* \returns SDL_TRUE if the CPU has AltiVec features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasAltiVec(void);
/**
* Determine whether the CPU has MMX features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has MMX features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasMMX(void);
/**
* Determine whether the CPU has 3DNow! features.
*
* This always returns false on CPUs that aren't using AMD instruction sets.
*
* \returns SDL_TRUE if the CPU has 3DNow! features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_Has3DNow(void);
/**
* Determine whether the CPU has SSE features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has SSE features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasSSE(void);
/**
* Determine whether the CPU has SSE2 features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has SSE2 features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasSSE2(void);
/**
* Determine whether the CPU has SSE3 features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has SSE3 features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasSSE3(void);
/**
* Determine whether the CPU has SSE4.1 features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has SSE4.1 features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasSSE41(void);
/**
* Determine whether the CPU has SSE4.2 features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has SSE4.2 features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.0.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasSSE42(void);
/**
* Determine whether the CPU has AVX features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has AVX features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.2.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX2
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasAVX(void);
/**
* Determine whether the CPU has AVX2 features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has AVX2 features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.4.
*
* \sa SDL_Has3DNow
* \sa SDL_HasAltiVec
* \sa SDL_HasAVX
* \sa SDL_HasMMX
* \sa SDL_HasRDTSC
* \sa SDL_HasSSE
* \sa SDL_HasSSE2
* \sa SDL_HasSSE3
* \sa SDL_HasSSE41
* \sa SDL_HasSSE42
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasAVX2(void);
/**
* Determine whether the CPU has AVX-512F (foundation) features.
*
* This always returns false on CPUs that aren't using Intel instruction sets.
*
* \returns SDL_TRUE if the CPU has AVX-512F features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.9.
*
* \sa SDL_HasAVX
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasAVX512F(void);
/**
* Determine whether the CPU has ARM SIMD (ARMv6) features.
*
* This is different from ARM NEON, which is a different instruction set.
*
* This always returns false on CPUs that aren't using ARM instruction sets.
*
* \returns SDL_TRUE if the CPU has ARM SIMD features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.12.
*
* \sa SDL_HasNEON
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasARMSIMD(void);
/**
* Determine whether the CPU has NEON (ARM SIMD) features.
*
* This always returns false on CPUs that aren't using ARM instruction sets.
*
* \returns SDL_TRUE if the CPU has ARM NEON features or SDL_FALSE if not.
*
* \since This function is available since SDL 2.0.6.
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasNEON(void);
/**
* Determine whether the CPU has LSX (LOONGARCH SIMD) features.
*
* This always returns false on CPUs that aren't using LOONGARCH instruction
* sets.
*
* \returns SDL_TRUE if the CPU has LOONGARCH LSX features or SDL_FALSE if
* not.
*
* \since This function is available since SDL 2.24.0.
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasLSX(void);
/**
* Determine whether the CPU has LASX (LOONGARCH SIMD) features.
*
* This always returns false on CPUs that aren't using LOONGARCH instruction
* sets.
*
* \returns SDL_TRUE if the CPU has LOONGARCH LASX features or SDL_FALSE if
* not.
*
* \since This function is available since SDL 2.24.0.
*/
extern DECLSPEC SDL_bool SDLCALL SDL_HasLASX(void);
/**
* Get the amount of RAM configured in the system.
*
* \returns the amount of RAM configured in the system in MiB.
*
* \since This function is available since SDL 2.0.1.
*/
extern DECLSPEC int SDLCALL SDL_GetSystemRAM(void);
/**
* Report the alignment this system needs for SIMD allocations.
*
* This will return the minimum number of bytes to which a pointer must be
* aligned to be compatible with SIMD instructions on the current machine. For
* example, if the machine supports SSE only, it will return 16, but if it
* supports AVX-512F, it'll return 64 (etc). This only reports values for
* instruction sets SDL knows about, so if your SDL build doesn't have
* SDL_HasAVX512F(), then it might return 16 for the SSE support it sees and
* not 64 for the AVX-512 instructions that exist but SDL doesn't know about.
* Plan accordingly.
*
* \returns the alignment in bytes needed for available, known SIMD
* instructions.
*
* \since This function is available since SDL 2.0.10.
*/
extern DECLSPEC size_t SDLCALL SDL_SIMDGetAlignment(void);
/**
* Allocate memory in a SIMD-friendly way.
*
* This will allocate a block of memory that is suitable for use with SIMD
* instructions. Specifically, it will be properly aligned and padded for the
* system's supported vector instructions.
*
* The memory returned will be padded such that it is safe to read or write an
* incomplete vector at the end of the memory block. This can be useful so you
* don't have to drop back to a scalar fallback at the end of your SIMD
* processing loop to deal with the final elements without overflowing the
* allocated buffer.
*
* You must free this memory with SDL_FreeSIMD(), not free() or SDL_free() or
* delete[], etc.
*
* Note that SDL will only deal with SIMD instruction sets it is aware of; for
* example, SDL 2.0.8 knows that SSE wants 16-byte vectors (SDL_HasSSE()), and
* AVX2 wants 32 bytes (SDL_HasAVX2()), but doesn't know that AVX-512 wants
* 64. To be clear: if you can't decide to use an instruction set with an
* SDL_Has*() function, don't use that instruction set with memory allocated
* through here.
*
* SDL_AllocSIMD(0) will return a non-NULL pointer, assuming the system isn't
* out of memory, but you are not allowed to dereference it (because you only
* own zero bytes of that buffer).
*
* \param len The length, in bytes, of the block to allocate. The actual
* allocated block might be larger due to padding, etc.
* \returns a pointer to the newly-allocated block, NULL if out of memory.
*
* \since This function is available since SDL 2.0.10.
*
* \sa SDL_SIMDGetAlignment
* \sa SDL_SIMDRealloc
* \sa SDL_SIMDFree
*/
extern DECLSPEC void * SDLCALL SDL_SIMDAlloc(const size_t len);
/**
* Reallocate memory obtained from SDL_SIMDAlloc
*
* It is not valid to use this function on a pointer from anything but
* SDL_SIMDAlloc(). It can't be used on pointers from malloc, realloc,
* SDL_malloc, memalign, new[], etc.
*
* \param mem The pointer obtained from SDL_SIMDAlloc. This function also
* accepts NULL, at which point this function is the same as
* calling SDL_SIMDAlloc with a NULL pointer.
* \param len The length, in bytes, of the block to allocated. The actual
* allocated block might be larger due to padding, etc. Passing 0
* will return a non-NULL pointer, assuming the system isn't out of
* memory.
* \returns a pointer to the newly-reallocated block, NULL if out of memory.
*
* \since This function is available since SDL 2.0.14.
*
* \sa SDL_SIMDGetAlignment
* \sa SDL_SIMDAlloc
* \sa SDL_SIMDFree
*/
extern DECLSPEC void * SDLCALL SDL_SIMDRealloc(void *mem, const size_t len);
/**
* Deallocate memory obtained from SDL_SIMDAlloc
*
* It is not valid to use this function on a pointer from anything but
* SDL_SIMDAlloc() or SDL_SIMDRealloc(). It can't be used on pointers from
* malloc, realloc, SDL_malloc, memalign, new[], etc.
*
* However, SDL_SIMDFree(NULL) is a legal no-op.
*
* The memory pointed to by `ptr` is no longer valid for access upon return,
* and may be returned to the system or reused by a future allocation. The
* pointer passed to this function is no longer safe to dereference once this
* function returns, and should be discarded.
*
* \param ptr The pointer, returned from SDL_SIMDAlloc or SDL_SIMDRealloc, to
* deallocate. NULL is a legal no-op.
*
* \since This function is available since SDL 2.0.10.
*
* \sa SDL_SIMDAlloc
* \sa SDL_SIMDRealloc
*/
extern DECLSPEC void SDLCALL SDL_SIMDFree(void *ptr);
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "close_code.h"
#endif /* SDL_cpuinfo_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.