repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/db/src/query11/pipe_types.hpp | #ifndef __PIPE_TYPES_H__
#define __PIPE_TYPES_H__
#pragma once
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../db_utils/StreamingData.hpp"
#include "../dbdata.hpp"
using namespace sycl;
//
// A single row of the PARTSUPPLIER table
// with a subset of the columns (needed for this query)
//
class PartSupplierRow {
public:
PartSupplierRow()
: valid(false), partkey(0), suppkey(0), availqty(0), supplycost(0) {}
PartSupplierRow(bool v_valid, DBIdentifier v_partkey, DBIdentifier v_suppkey,
int v_availqty, DBDecimal v_supplycost)
: valid(v_valid),
partkey(v_partkey),
suppkey(v_suppkey),
availqty(v_availqty),
supplycost(v_supplycost) {}
// NOTE: this is not true, but is key to be used by MapJoin
DBIdentifier PrimaryKey() const { return suppkey; }
bool valid;
DBIdentifier partkey;
DBIdentifier suppkey;
int availqty;
DBDecimal supplycost;
};
//
// A row of the join SUPPLIER and PARTSUPPLIER table
//
class SupplierPartSupplierJoined {
public:
SupplierPartSupplierJoined()
: valid(false), partkey(0), supplycost(0), nationkey(0) {}
SupplierPartSupplierJoined(bool v_valid, DBIdentifier v_partkey, int v_availqty,
DBDecimal v_supplycost, unsigned char v_nationkey)
: valid(v_valid),
partkey(v_partkey),
availqty(v_availqty),
supplycost(v_supplycost),
nationkey(v_nationkey) {}
DBIdentifier PrimaryKey() const { return partkey; }
void Join(const unsigned char nation_key, const PartSupplierRow& ps_row) {
partkey = ps_row.partkey;
availqty = ps_row.availqty;
supplycost = ps_row.supplycost;
nationkey = nation_key;
}
bool valid;
DBIdentifier partkey;
int availqty;
DBDecimal supplycost;
unsigned char nationkey;
};
//
// The output data for this kernel (the {partkey,partvalue} pair)
// this is the datatype that is sorted by the FifoSorter
//
class OutputData {
public:
OutputData() {}
// OutputData() : partkey(0), partvalue(0) {}
OutputData(DBIdentifier v_partkey, DBDecimal v_partvalue)
: partkey(v_partkey), partvalue(v_partvalue) {}
bool operator<(const OutputData& t) const { return partvalue < t.partvalue; }
bool operator>(const OutputData& t) const { return partvalue > t.partvalue; }
bool operator==(const OutputData& t) const {
return partvalue == t.partvalue;
}
bool operator!=(const OutputData& t) const {
return partvalue != t.partvalue;
}
DBIdentifier partkey;
DBDecimal partvalue;
};
constexpr int kJoinWinSize = 1;
// pipe types
using PartSupplierRowPipeData =
StreamingData<PartSupplierRow, kJoinWinSize>;
using SupplierPartSupplierJoinedPipeData =
StreamingData<SupplierPartSupplierJoined, kJoinWinSize>;
// pipes
using ProducePartSupplierPipe =
pipe<class ProducePartSupplierPipeClass, PartSupplierRowPipeData>;
using PartSupplierPartsPipe =
pipe<class PartSupplierPartsPipeClass, SupplierPartSupplierJoinedPipeData>;
#endif /* __PIPE_TYPES_H__ */
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/db/src/query1/query1_kernel.hpp | #ifndef __QUERY1_KERNEL_HPP__
#define __QUERY1_KERNEL_HPP__
#pragma once
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../dbdata.hpp"
using namespace sycl;
bool SubmitQuery1(queue& q, Database& dbinfo, DBDate low_date,
std::array<DBDecimal, kQuery1OutSize>& sum_qty,
std::array<DBDecimal, kQuery1OutSize>& sum_base_price,
std::array<DBDecimal, kQuery1OutSize>& sum_disc_price,
std::array<DBDecimal, kQuery1OutSize>& sum_charge,
std::array<DBDecimal, kQuery1OutSize>& avg_qty,
std::array<DBDecimal, kQuery1OutSize>& avg_price,
std::array<DBDecimal, kQuery1OutSize>& avg_discount,
std::array<DBDecimal, kQuery1OutSize>& count,
double& kernel_latency, double& total_latency);
#endif //__QUERY1_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/db/src/query1/query1_kernel.cpp | #include <stdio.h>
#include "query1_kernel.hpp"
#include "../db_utils/Accumulator.hpp"
#include "../db_utils/Tuple.hpp"
#include "../db_utils/Unroller.hpp"
using namespace std::chrono;
// how many elements to compute per cycle
#if defined(FPGA_SIMULATOR)
constexpr int kElementsPerCycle = 2;
#else
constexpr int kElementsPerCycle = 12;
#endif
// the kernel name
class Query1;
bool SubmitQuery1(queue& q, Database& dbinfo, DBDate low_date,
std::array<DBDecimal, kQuery1OutSize>& sum_qty,
std::array<DBDecimal, kQuery1OutSize>& sum_base_price,
std::array<DBDecimal, kQuery1OutSize>& sum_disc_price,
std::array<DBDecimal, kQuery1OutSize>& sum_charge,
std::array<DBDecimal, kQuery1OutSize>& avg_qty,
std::array<DBDecimal, kQuery1OutSize>& avg_price,
std::array<DBDecimal, kQuery1OutSize>& avg_discount,
std::array<DBDecimal, kQuery1OutSize>& count,
double& kernel_latency, double& total_latency) {
// create space for input buffers
buffer quantity_buf(dbinfo.l.quantity);
buffer extendedprice_buf(dbinfo.l.extendedprice);
buffer discount_buf(dbinfo.l.discount);
buffer tax_buf(dbinfo.l.tax);
buffer returnflag_buf(dbinfo.l.returnflag);
buffer linestatus_buf(dbinfo.l.linestatus);
buffer shipdate_buf(dbinfo.l.shipdate);
// setup the output buffers
buffer sum_qty_buf(sum_qty);
buffer sum_base_price_buf(sum_base_price);
buffer sum_disc_price_buf(sum_disc_price);
buffer sum_charge_buf(sum_charge);
buffer avg_qty_buf(avg_qty);
buffer avg_price_buf(avg_price);
buffer avg_discount_buf(avg_discount);
buffer count_buf(count);
const int rows = dbinfo.l.rows;
const size_t iters = (rows + kElementsPerCycle - 1) / kElementsPerCycle;
// start timer
high_resolution_clock::time_point host_start = high_resolution_clock::now();
/////////////////////////////////////////////////////////////////////////////
//// Query1 Kernel
auto event = q.submit([&](handler& h) {
// read accessors
accessor quantity_accessor(quantity_buf, h, read_only);
accessor extendedprice_accessor(extendedprice_buf, h, read_only);
accessor discount_accessor(discount_buf, h, read_only);
accessor tax_accessor(tax_buf, h, read_only);
accessor returnflag_accessor(returnflag_buf, h, read_only);
accessor linestatus_accessor(linestatus_buf, h, read_only);
accessor shipdate_accessor(shipdate_buf, h, read_only);
// write accessors
accessor sum_qty_accessor(sum_qty_buf, h, write_only, no_init);
accessor sum_base_price_accessor(sum_base_price_buf, h, write_only, no_init);
accessor sum_disc_price_accessor(sum_disc_price_buf, h, write_only, no_init);
accessor sum_charge_accessor(sum_charge_buf, h, write_only, no_init);
accessor avg_qty_accessor(avg_qty_buf, h, write_only, no_init);
accessor avg_price_accessor(avg_price_buf, h, write_only, no_init);
accessor avg_discount_accessor(avg_discount_buf, h, write_only, no_init);
accessor count_accessor(count_buf, h, write_only, no_init);
h.single_task<Query1>([=]() [[intel::kernel_args_restrict]] {
// local accumulation buffers
RegisterAccumulator<DBDecimal, 6, unsigned char> sum_qty_local;
RegisterAccumulator<DBDecimal, 6, unsigned char> sum_base_price_local;
RegisterAccumulator<DBDecimal, 6, unsigned char> sum_disc_price_local;
RegisterAccumulator<DBDecimal, 6, unsigned char> sum_charge_local;
RegisterAccumulator<DBDecimal, 6, unsigned char> avg_discount_local;
RegisterAccumulator<DBDecimal, 6, unsigned char> count_local;
// initialize the accumulators
sum_qty_local.Init();
sum_base_price_local.Init();
sum_disc_price_local.Init();
sum_charge_local.Init();
avg_discount_local.Init();
count_local.Init();
// stream each row in the DB (kElementsPerCycle rows at a time)
[[intel::initiation_interval(1)]]
for (size_t r = 0; r < iters; r++) {
// locals
DBDecimal qty[kElementsPerCycle];
DBDecimal extendedprice[kElementsPerCycle];
DBDecimal discount[kElementsPerCycle];
DBDecimal tax[kElementsPerCycle];
DBDecimal disc_price_tmp[kElementsPerCycle];
DBDecimal charge_tmp[kElementsPerCycle];
DBDecimal count_tmp[kElementsPerCycle];
unsigned char out_idx[kElementsPerCycle];
bool row_valid[kElementsPerCycle];
// multiple elements per cycle
UnrolledLoop<0, kElementsPerCycle>([&](auto p) {
// is data in range of the table
// (data size may not be divisible by kElementsPerCycle)
size_t idx = r * kElementsPerCycle + p;
bool in_range = idx < rows;
// get this rows shipdate
DBDate shipdate = shipdate_accessor[idx];
// determine if the row is valid
row_valid[p] = in_range && (shipdate <= low_date);
// read or set values based on the validity of the data
qty[p] = quantity_accessor[idx];
extendedprice[p] = extendedprice_accessor[idx];
discount[p] = discount_accessor[idx];
tax[p] = tax_accessor[idx];
char rf = returnflag_accessor[idx];
char ls = linestatus_accessor[idx];
count_tmp[p] = 1;
// convert returnflag and linestatus into an index
unsigned char rf_idx;
if (rf == 'R') {
rf_idx = 0;
} else if (rf == 'A') {
rf_idx = 1;
} else { // == 'N'
rf_idx = 2;
}
unsigned char ls_idx;
if (ls == 'O') {
ls_idx = 0;
} else { // == 'F'
ls_idx = 1;
}
out_idx[p] = ls_idx * kReturnFlagSize + rf_idx;
// intermediate calculations
disc_price_tmp[p] = extendedprice[p] * (100 - discount[p]);
charge_tmp[p] =
extendedprice[p] * (100 - discount[p]) * (100 + tax[p]);
});
// reduction accumulation
UnrolledLoop<0, kElementsPerCycle>([&](auto p) {
sum_qty_local.Accumulate(out_idx[p],
row_valid[p] ? qty[p] : 0);
sum_base_price_local.Accumulate(out_idx[p],
row_valid[p] ? extendedprice[p] : 0);
sum_disc_price_local.Accumulate(out_idx[p],
row_valid[p] ? disc_price_tmp[p] : 0);
sum_charge_local.Accumulate(out_idx[p],
row_valid[p] ? charge_tmp[p] : 0);
count_local.Accumulate(out_idx[p],
row_valid[p] ? count_tmp[p] : 0);
avg_discount_local.Accumulate(out_idx[p],
row_valid[p] ? discount[p] : 0);
});
}
// perform averages and push back to global memory
#pragma unroll
for (size_t i = 0; i < kQuery1OutSize; i++) {
DBDecimal count = count_local.Get(i);
sum_qty_accessor[i] = sum_qty_local.Get(i);
sum_base_price_accessor[i] = sum_base_price_local.Get(i);
sum_disc_price_accessor[i] = sum_disc_price_local.Get(i);
sum_charge_accessor[i] = sum_charge_local.Get(i);
avg_qty_accessor[i] = (count == 0) ? 0 : (sum_qty_local.Get(i) / count);
avg_price_accessor[i] =
(count == 0) ? 0 : (sum_base_price_local.Get(i) / count);
avg_discount_accessor[i] =
(count == 0) ? 0 : (avg_discount_local.Get(i) / count);
count_accessor[i] = count;
}
});
});
/////////////////////////////////////////////////////////////////////////////
// wait for kernel to finish
event.wait();
high_resolution_clock::time_point host_end = high_resolution_clock::now();
duration<double, std::milli> diff = host_end - host_start;
// gather profiling info
auto kernel_start_time =
event.get_profiling_info<info::event_profiling::command_start>();
auto kernel_end_time =
event.get_profiling_info<info::event_profiling::command_end>();
// calculating the kernel execution time in ms
auto kernel_execution_time = (kernel_end_time - kernel_start_time) * 1e-6;
kernel_latency = kernel_execution_time;
total_latency = diff.count();
return true;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/main.cpp | #include <sycl/sycl.hpp>
#include <algorithm>
#include <array>
#include <fstream>
#include <limits>
#include <numeric>
#include <optional>
#include <sstream>
#include <string>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include <thread>
#include <type_traits>
#include <utility>
#include <vector>
#include "constexpr_math.hpp" // included from ../../../include
#include "common/common.hpp"
#include "exception_handler.hpp"
using namespace sycl;
// ensure only one of GZIP and SNAPPY is defined
#if defined(GZIP) and defined(SNAPPY)
static_assert(false, "Only one of GZIP and SNAPPY can be defined!");
#endif
// if neither of GZIP and SNAPPY is defined, default to SNAPPY
#if not defined(GZIP) and not defined(SNAPPY)
#define SNAPPY
#endif
// the number of literals to process at once can be set from the command line
// use the macro -DLITERALS_PER_CYCLE=<literals_per_cycle>
// This is sent to the LZ77 decoder to read multiple elements at once from
// the history buffer.
#if not defined(LITERALS_PER_CYCLE)
// default LITERALS_PER_CYCLE for GZIP
#if defined(GZIP)
#define LITERALS_PER_CYCLE 4
#endif
// default LITERALS_PER_CYCLE for SNAPPY
#if defined(SNAPPY)
#define LITERALS_PER_CYCLE 8
#endif
#endif
constexpr unsigned kLiteralsPerCycle = LITERALS_PER_CYCLE;
static_assert(kLiteralsPerCycle > 0);
static_assert(fpga_tools::IsPow2(kLiteralsPerCycle));
// include files and aliases specific to GZIP and SNAPPY decompression
#if defined(GZIP)
#include "gzip/gzip_decompressor.hpp"
#else
#include "snappy/snappy_data_gen.hpp"
#include "snappy/snappy_decompressor.hpp"
#endif
// aliases and testing functions specific to GZIP and SNAPPY decompression
#if defined(GZIP)
using GzipDecompressorT = GzipDecompressor<kLiteralsPerCycle>;
bool RunGzipTest(sycl::queue& q, GzipDecompressorT decompressor,
const std::string test_dir);
std::string decompressor_name = "GZIP";
#else
using SnappyDecompressorT = SnappyDecompressor<kLiteralsPerCycle>;
bool RunSnappyTest(sycl::queue& q, SnappyDecompressorT decompressor,
const std::string test_dir);
std::string decompressor_name = "SNAPPY";
#endif
// Prints the usage for the executable command line args
void PrintUsage(std::string exe_name) {
std::cerr << "USAGE: \n"
<< exe_name << " <input filename> <output filename> [runs]\n"
<< exe_name << " <test directory>" << std::endl;
}
int main(int argc, char* argv[]) {
// reading and validating the command line arguments
// if no arguments are given, we will run the default tests for uncompressed,
// statically compressed, and dynamically compressed blocks
// if arguments are given, we will assume the user wants to decompress a
// specific file
#if defined(GZIP)
std::string test_dir = "../data/gzip";
#else
std::string test_dir = "../data/snappy";
#endif
std::string in_filename;
std::string out_filename;
int runs;
bool default_test_mode = false;
if (argc == 1 || argc == 2) {
default_test_mode = true;
} else if (argc > 4) {
PrintUsage(argv[0]);
return 1;
}
if (default_test_mode) {
if (argc > 1) test_dir = argv[1];
} else {
// default the number of runs based on emulation, simulation, or hardware
#if defined(FPGA_EMULATOR)
runs = 2;
#elif defined(FPGA_SIMULATOR)
runs = 1;
#else
runs = 9;
#endif
in_filename = argv[1];
out_filename = argv[2];
if (argc > 3) runs = atoi(argv[3]);
if (runs < 1) {
std::cerr << "ERROR: 'runs' must be greater than 0\n";
std::terminate();
}
}
std::cout << "Using " << decompressor_name << " decompression\n";
std::cout << std::endl;
#if FPGA_SIMULATOR
auto selector = sycl::ext::intel::fpga_simulator_selector_v;
#elif FPGA_HARDWARE
auto selector = sycl::ext::intel::fpga_selector_v;
#else // #if FPGA_EMULATOR
auto selector = sycl::ext::intel::fpga_emulator_selector_v;
#endif
// create the device queue
queue q(selector, fpga_tools::exception_handler);
device device = q.get_device();
std::cout << "Running on device: "
<< device.get_info<info::device::name>().c_str()
<< std::endl;
// create the decompressor based on which decompression version we are using
#if defined(GZIP)
GzipDecompressorT decompressor;
#else
SnappyDecompressorT decompressor;
#endif
// perform the test or single file decompression
bool passed;
if (default_test_mode) {
#if defined(GZIP)
passed = RunGzipTest(q, decompressor, test_dir);
#else
passed = RunSnappyTest(q, decompressor, test_dir);
#endif
} else {
// decompress a specific file specified at the command line
passed = decompressor.DecompressFile(q, in_filename, out_filename, runs,
true, true);
}
if (passed) {
std::cout << "PASSED" << std::endl;
return 0;
} else {
std::cout << "FAILED" << std::endl;
return 1;
}
}
//
// Pretty formatting for printing the result of a test
//
void PrintTestResults(std::string test_name, bool passed) {
if (passed)
std::cout << ">>>>> " << test_name << ": PASSED <<<<<\n";
else
std::cerr << ">>>>> " << test_name << ": FAILED <<<<<\n";
}
#if defined(GZIP)
bool RunGzipTest(sycl::queue& q, GzipDecompressorT decompressor,
const std::string test_dir) {
#ifdef FPGA_SIMULATOR
// the name of the file for the simulator is fixed
std::string small_filename = test_dir + "/small.gz";
std::cout << ">>>>> Small File Test <<<<<" << std::endl;
bool small_test_pass = decompressor.DecompressFile(
q, small_filename, "", 1, false, false);
PrintTestResults("Small File Test", small_test_pass);
std::cout << std::endl;
return small_test_pass;
#else
// the name of the files for the default test are fixed
std::string uncompressed_filename = test_dir + "/uncompressed.gz";
std::string static_compress_filename = test_dir + "/static_compressed.gz";
std::string dynamic_compress_filename = test_dir + "/dynamic_compressed.gz";
std::string tp_test_filename = test_dir + "/tp_test.gz";
std::cout << ">>>>> Uncompressed File Test <<<<<" << std::endl;
bool uncompressed_test_pass = decompressor.DecompressFile(
q, uncompressed_filename, "", 1, false, false);
PrintTestResults("Uncompressed File Test", uncompressed_test_pass);
std::cout << std::endl;
std::cout << ">>>>> Statically Compressed File Test <<<<<" << std::endl;
bool static_test_pass = decompressor.DecompressFile(
q, static_compress_filename, "", 1, false, false);
PrintTestResults("Statically Compressed File Test", static_test_pass);
std::cout << std::endl;
std::cout << ">>>>> Dynamically Compressed File Test <<<<<" << std::endl;
bool dynamic_test_pass = decompressor.DecompressFile(
q, dynamic_compress_filename, "", 1, false, false);
PrintTestResults("Dynamically Compressed File Test", dynamic_test_pass);
std::cout << std::endl;
std::cout << ">>>>> Throughput Test <<<<<" << std::endl;
constexpr int kTPTestRuns = 5;
bool tp_test_pass = decompressor.DecompressFile(q, tp_test_filename, "",
kTPTestRuns, true, false);
PrintTestResults("Throughput Test", tp_test_pass);
std::cout << std::endl;
return uncompressed_test_pass && static_test_pass && dynamic_test_pass &&
tp_test_pass;
#endif
}
#endif
#if defined(SNAPPY)
bool RunSnappyTest(sycl::queue& q, SnappyDecompressorT decompressor,
const std::string test_dir) {
#ifdef FPGA_SIMULATOR
std::cout << ">>>>> Alice In Wonderland Test <<<<<" << std::endl;
std::string alice_in_file = test_dir + "/alice29_small.txt.sz";
auto in_bytes = ReadInputFile(alice_in_file);
auto result = decompressor.DecompressBytes(q, in_bytes, 1, false);
std::string alice_ref_file = test_dir + "/alice29_small_ref.txt";
auto ref_bytes = ReadInputFile(alice_ref_file);
bool alice_test_pass =
(result != std::nullopt) && (result.value() == ref_bytes);
PrintTestResults("Alice In Wonderland Test", alice_test_pass);
std::cout << std::endl;
return alice_test_pass;
#else
std::cout << ">>>>> Alice In Wonderland Test <<<<<" << std::endl;
std::string alice_in_file = test_dir + "/alice29.txt.sz";
auto in_bytes = ReadInputFile(alice_in_file);
auto result = decompressor.DecompressBytes(q, in_bytes, 1, false);
std::string alice_ref_file = test_dir + "/alice29.ref.txt";
auto ref_bytes = ReadInputFile(alice_ref_file);
bool alice_test_pass =
(result != std::nullopt) && (result.value() == ref_bytes);
PrintTestResults("Alice In Wonderland Test", alice_test_pass);
std::cout << std::endl;
std::cout << ">>>>> Only Literal Strings Test <<<<<" << std::endl;
auto test1_bytes = GenerateSnappyCompressedData(333, 3, 0, 0, 3);
auto test1_ret = decompressor.DecompressBytes(q, test1_bytes, 1, false);
bool test1_pass = test1_ret != std::nullopt;
PrintTestResults("Only Literal Strings Test", test1_pass);
std::cout << std::endl;
std::cout << ">>>>> Many Copies Test <<<<<" << std::endl;
auto test2_bytes = GenerateSnappyCompressedData(65535, 1, 64, 13, 9);
auto test2_ret = decompressor.DecompressBytes(q, test2_bytes, 1, false);
bool test2_pass = test2_ret != std::nullopt;
PrintTestResults("Many Copies Test", test2_pass);
std::cout << std::endl;
std::cout << ">>>>> Mixed Literal Strings and Copies Test <<<<<" << std::endl;
auto test3_bytes = GenerateSnappyCompressedData(16065, 7, 13, 5, 3);
auto test3_ret = decompressor.DecompressBytes(q, test3_bytes, 1, false);
bool test3_pass = test3_ret != std::nullopt;
PrintTestResults("Mixed Literal Strings and Copies Test", test3_pass);
std::cout << std::endl;
std::cout << ">>>>> Throughput Test <<<<<" << std::endl;
constexpr int kTPTestRuns = 5;
#ifndef FPGA_EMULATOR
auto test_tp_bytes = GenerateSnappyCompressedData(65536, 2, 0, 0, 128);
#else
auto test_tp_bytes = GenerateSnappyCompressedData(65536, 2, 0, 0, 2);
#endif
auto test_tp_ret =
decompressor.DecompressBytes(q, test_tp_bytes, kTPTestRuns, true);
bool test_tp_pass = test_tp_ret != std::nullopt;
PrintTestResults("Throughput Test", test_tp_pass);
std::cout << std::endl;
return alice_test_pass && test1_pass && test2_pass && test3_pass &&
test_tp_pass;
#endif
}
#endif
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/common/simple_crc32.hpp | #ifndef __SIMPLE_CRC32_HPP__
#define __SIMPLE_CRC32_HPP__
//
// A simple CRC-32 implementation (not optimized for high performance).
// Compute CRC-32 on 'len' elements in 'buf', starting with a CRC of 'init'.
//
// Arguments:
// init: the initial CRC value. This is used to string together multiple
// calls to SimpleCRC32. For the first iteration, use 0.
// buf: a pointer to the data
// len: the number of bytes pointer to by 'buf'
//
unsigned int SimpleCRC32(unsigned init, const void* buf, size_t len) {
// generate the 256-element table
constexpr uint32_t polynomial = 0xEDB88320;
constexpr auto table = [] {
std::array<uint32_t, 256> a{};
for (uint32_t i = 0; i < 256; i++) {
uint32_t c = i;
for (uint32_t j = 0; j < 8; j++) {
if (c & 1) {
c = polynomial ^ (c >> 1);
} else {
c >>= 1;
}
}
a[i] = c;
}
return a;
}();
// compute the CRC-32 for the input data
unsigned c = init ^ 0xFFFFFFFF;
const uint8_t* u = static_cast<const uint8_t*>(buf);
for (size_t i = 0; i < len; i++) {
c = table[(c ^ u[i]) & 0xFF] ^ (c >> 8);
}
return c ^ 0xFFFFFFFF;
}
#endif /* __SIMPLE_CRC32_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/common/lz77_decoder.hpp | #ifndef __LZ77_DECODER_HPP__
#define __LZ77_DECODER_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "common.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "metaprogramming_utils.hpp" // included from ../../../../include
#include "onchip_memory_with_cache.hpp" // included from ../../../../include
#include "tuple.hpp" // included from ../../../../include
#include "unrolled_loop.hpp" // included from ../../../../include
//
// Performs LZ77 decoding for more than 1 element at once.
// Streams in 'LZ77InputData' (see common.hpp) appended with a flag (FlagBundle)
// which contains either a literal from upstream, or a {length, distance} pair.
// Given a literal input, this function simply tracks that literal in a history
// buffer and writes it to the output. For a {length, distance} pair, this
// function reads 'literals_per_cycle' elements from the history buffer per
// cycle and writes them to the output.
//
// Template parameters:
// InPipe: a SYCL pipe that streams in LZ77InputData with a boolean flag that
// indicates whether the input stream is done.
// OutPipe: a SYCL pipe that streams out an array of literals and a
// 'valid_count' that is in the range [0, literals_per_cycle].
// literals_per_cycle: the number of literals to read from the history
// buffer at once. This sets the maximum possible throughput for the
// LZ77 decoder.
// max_distance: the maximum distancefor a {length, distance} pair
// For example, for DEFLATE this is 32K and for snappy this is 64K.
//
template <typename InPipe, typename OutPipe, size_t literals_per_cycle,
size_t max_distance>
void LZ77DecoderMultiElement() {
using OutPipeBundleT = decltype(OutPipe::read());
using OutDataT = decltype(std::declval<OutPipeBundleT>().data);
// we will cyclically partition the history to 'literals_per_cycle' buffers,
// so each buffer gets this many elements
constexpr size_t history_buffer_count = max_distance / literals_per_cycle;
// number of bits to count from 0 to literals_per_cycle-1
constexpr size_t history_buffer_buffer_idx_bits =
fpga_tools::Log2(literals_per_cycle);
// bit mask for counting from 0 to literals_per_cycle-1
constexpr size_t history_buffer_buffer_idx_mask = literals_per_cycle - 1;
// number of bits to count from 0 to history_buffer_count-1
constexpr size_t history_buffer_idx_bits =
fpga_tools::Log2(history_buffer_count);
// bit mask for counting from 0 to history_buffer_count-1
constexpr size_t history_buffer_idx_mask = history_buffer_count - 1;
// the data type used to index from 0 to literals_per_cycle-1 (i.e., pick
// which buffer to use)
using HistBufBufIdxT = ac_uint<history_buffer_buffer_idx_bits>;
// the data type used to index from 0 to history_buffer_count-1 (i.e., after
// picking which buffer, index into that buffer)
using HistBufIdxT = ac_uint<history_buffer_idx_bits>;
// track whether we are reading from the history, and how many more elements
// to read from the history
bool reading_history = false;
bool reading_history_next;
short history_counter;
// which of the 'literals_per_cycle' buffers is the one to write to next
HistBufBufIdxT history_buffer_buffer_idx = 0;
// for each of the 'literals_per_cycle' buffers, where do we write next
[[intel::fpga_register]] HistBufIdxT history_buffer_idx[literals_per_cycle];
// the OnchipMemoryWithCache history buffers cache in-flight writes to the
// history buffer and break loop carried dependencies that are smaller than
// kCacheDepth
constexpr int kCacheDepth = 8;
// the history buffers
fpga_tools::NTuple<fpga_tools::OnchipMemoryWithCache<
unsigned char, history_buffer_count, kCacheDepth + 1>,
literals_per_cycle>
history_buffer;
// these variables are used to read from the history buffer upon request from
// the Huffman decoder kernel
HistBufBufIdxT read_history_buffer_buffer_idx = 0;
[[intel::fpga_register]] // NO-FORMAT: Attribute
HistBufIdxT read_history_buffer_idx[literals_per_cycle];
[[intel::fpga_register]] // NO-FORMAT: Attribute
HistBufBufIdxT read_history_shuffle_idx[literals_per_cycle];
// precompute the function: dist + ((i - dist) % dist)
// which is used for the corner case when the copy distance is less than
// 'literals_per_cycle'
[[intel::fpga_register]] // NO-FORMAT: Attribute
constexpr auto mod_lut = [&] {
constexpr int dim = literals_per_cycle - 1;
std::array<std::array<unsigned char, dim>, dim> ret{};
for (int y = 0; y < dim; y++) {
for (int x = y; x < dim; x++) {
unsigned char dist = y + 1;
unsigned char i = x + 1;
ret[y][x] = dist - ((i - dist) % dist);
}
}
return ret;
}();
// initialize the index pointers for each history buffer
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
history_buffer_idx[i] = 0;
}
bool done = false;
// the main processing loop.
// Using the OnchipMemoryWithCache, we are able to break all loop carried
// dependencies with a distance of 'kCacheDepth' and less
while (!done) {
bool data_valid = true;
OutDataT out_data;
if (!reading_history) {
// if we aren't currently reading from the history buffers,
// then read from input pipe
auto pipe_data = InPipe::read(data_valid);
// check if the upstream kernel is done sending us data
done = pipe_data.flag && data_valid;
// grab the literal or the length and distance pair
unsigned short dist = pipe_data.data.distance;
// for the case of literal(s), we will simply write it to the output
// get the specific LZ77InputData type to see how many literals can come
// in the input at once and that it is less than literals_per_cycle
using InputLZ77DataT = decltype(pipe_data.data);
static_assert(InputLZ77DataT::max_literals <= literals_per_cycle);
#pragma unroll
for (int i = 0; i < InputLZ77DataT::max_literals; i++) {
out_data[i] = pipe_data.data.literal[i];
}
out_data.valid_count = pipe_data.data.valid_count;
// if we get a length distance pair we will read 'pipe_data.data.length'
// bytes starting at and offset of 'dist'
history_counter = pipe_data.data.length;
reading_history = !pipe_data.data.is_literal && data_valid;
reading_history_next = history_counter > literals_per_cycle;
// grab the low Log2(literals_per_cycle) bits of the distance
HistBufBufIdxT dist_small = dist & history_buffer_buffer_idx_mask;
// find which of the history buffers we will read from first
read_history_buffer_buffer_idx =
(history_buffer_buffer_idx - dist_small) &
history_buffer_buffer_idx_mask;
// find the starting read index for each history buffer, and compute the
// shuffle vector for shuffling the data to the output
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
// the buffer index
HistBufBufIdxT buf_idx =
(read_history_buffer_buffer_idx + HistBufBufIdxT(i)) &
history_buffer_buffer_idx_mask;
// compute the starting index for buffer 'buf_idx' (in range
// [0, literals_per_cycle))
HistBufIdxT starting_read_idx_for_this_buf =
(history_buffer_idx[buf_idx] - ((dist - i) / literals_per_cycle)) -
1;
if (buf_idx == history_buffer_buffer_idx) {
starting_read_idx_for_this_buf += 1;
}
read_history_buffer_idx[buf_idx] =
starting_read_idx_for_this_buf & history_buffer_idx_mask;
if (dist > i) {
// normal case for ths shuffle vector
read_history_shuffle_idx[i] = buf_idx;
} else {
// EDGE CASE!
// this special case happens whenever dist < literals_per_cycle
// and we need to repeat one of the earlier elements
// idx_back = dist_small - ((i - dist_small) % dist_small));
HistBufBufIdxT idx_back = mod_lut[dist_small - 1][i - 1];
read_history_shuffle_idx[i] = (history_buffer_buffer_idx - idx_back) &
history_buffer_buffer_idx_mask;
}
}
}
if (reading_history) {
// grab from each of the history buffers
unsigned char historical_bytes[literals_per_cycle];
fpga_tools::UnrolledLoop<literals_per_cycle>([&](auto i) {
// get the index into this buffer and read from it
auto idx_in_buf = read_history_buffer_idx[i];
historical_bytes[i] = history_buffer.template get<i>().read(idx_in_buf);
});
// shuffle the elements read from the history buffers to the output
// using the shuffle vector computed earlier. Note, the numbers in the
// shuffle vector need not be unique, which happens in the special case
// of dist < literals_per_cycle, described above.
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
out_data[i] = historical_bytes[read_history_shuffle_idx[i]];
}
// we will write out min(history_counter, literals_per_cycle)
// this can happen when the length of the copy is not a multiple of
// 'literals_per_cycle'
if (history_counter < literals_per_cycle) {
out_data.valid_count = history_counter;
} else {
out_data.valid_count = literals_per_cycle;
}
// update the history read indices for the next iteration (if we are still
// reading from the history buffers)
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
read_history_buffer_idx[i] =
(read_history_buffer_idx[i] + ac_uint<1>(1)) &
history_buffer_idx_mask;
}
// update whether we will still be reading from the history buffers on
// the next iteration of the loop
reading_history = reading_history_next;
reading_history_next = history_counter > literals_per_cycle * 2;
history_counter -= literals_per_cycle;
}
if (!done && data_valid) {
// compute the valid bitmap and shuffle vector for the writes
bool write_bitmap[literals_per_cycle];
[[intel::fpga_register]] HistBufBufIdxT shuffle_vec[literals_per_cycle];
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
HistBufBufIdxT buf_idx =
(history_buffer_buffer_idx + i) & history_buffer_buffer_idx_mask;
write_bitmap[buf_idx] = i < out_data.valid_count;
shuffle_vec[buf_idx] = i;
}
// write to the history buffers
fpga_tools::UnrolledLoop<literals_per_cycle>([&](auto i) {
if (write_bitmap[i]) {
// grab the literal to write out
auto literal_out = out_data[shuffle_vec[i]];
// the index into this history buffer
HistBufIdxT idx_in_buf = history_buffer_idx[i];
// history_buffer.template get<i>()[idx_in_buf] = literal_out;
history_buffer.template get<i>().write(idx_in_buf, literal_out);
// update the history buffer index
history_buffer_idx[i] =
(history_buffer_idx[i] + ac_uint<1>(1)) & history_buffer_idx_mask;
}
});
// update the most recent buffer
history_buffer_buffer_idx =
(history_buffer_buffer_idx + out_data.valid_count) &
history_buffer_buffer_idx_mask;
// write the output to the pipe
OutPipe::write(OutPipeBundleT(out_data));
}
}
OutPipe::write(OutPipeBundleT(true));
}
//
// For performance reasons, we provide a special version of LZ77 for
// literals_per_cycle = 1. See the comments on the LZ77DecoderMultiElement
// function above for information on the template parameters.
//
template <typename InPipe, typename OutPipe, size_t max_distance>
void LZ77DecoderSingleElement() {
using OutPipeBundleT = decltype(OutPipe::read());
using OutDataT = decltype(std::declval<OutPipeBundleT>().data);
constexpr size_t history_buffer_count = max_distance;
constexpr size_t history_buffer_idx_bits =
fpga_tools::Log2(history_buffer_count);
constexpr size_t history_buffer_idx_mask = history_buffer_count - 1;
using HistBufIdxT = ac_uint<history_buffer_idx_bits>;
// track whether we are reading from the history, and how many more elements
// to read from the history
bool reading_history = false;
bool reading_history_next;
short history_counter;
// the history buffers
HistBufIdxT history_buffer_idx = 0, read_history_buffer_idx;
unsigned char history_buffer[history_buffer_count];
// the history buffer caches to cache in-flight writes and break loop carried
// dependencies
constexpr int kCacheDepth = 7;
[[intel::fpga_register]] // NO-FORMAT: Attribute
unsigned char history_buffer_cache_val[kCacheDepth + 1];
[[intel::fpga_register]] // NO-FORMAT: Attribute
HistBufIdxT history_buffer_cache_idx[kCacheDepth + 1];
bool done = false;
[[intel::ivdep(kCacheDepth)]] // NO-FORMAT: Attribute
while (!done) {
bool data_valid = true;
OutDataT out_data;
// if we aren't currently reading from the history, read from input pipe
if (!reading_history) {
// if we aren't currently reading from the history buffers,
// then read from input pipe
auto pipe_data = InPipe::read(data_valid);
// check if the upstream kernel is done sending us data
done = pipe_data.flag && data_valid;
// grab the literal or the length and distance pair
unsigned short dist = pipe_data.data.distance;
// for the case of a literal, we will simply write it to the output
out_data[0] = pipe_data.data.literal[0];
out_data.valid_count = ac_uint<1>(1);
// if we get a length distance pair we will read 'pipe_data.data.length'
// bytes starting at and offset of 'dist'
history_counter = pipe_data.data.length;
reading_history = !pipe_data.data.is_literal && data_valid;
reading_history_next = history_counter > 1;
// initialize the read index
read_history_buffer_idx =
(history_buffer_idx - dist) & history_buffer_idx_mask;
}
if (reading_history) {
// read from the history buffer
out_data[0] = history_buffer[read_history_buffer_idx];
// also check the cache to see if it is there
#pragma unroll
for (int j = 0; j < kCacheDepth + 1; j++) {
if (history_buffer_cache_idx[j] == read_history_buffer_idx) {
out_data[0] = history_buffer_cache_val[j];
}
}
out_data.valid_count = ac_uint<1>(1);
// update the history read index
read_history_buffer_idx =
(read_history_buffer_idx + ac_uint<1>(1)) & history_buffer_idx_mask;
// update whether we are still reading the history
reading_history = reading_history_next;
reading_history_next = history_counter > 2;
history_counter--;
}
if (!done && data_valid) {
// write to the most history buffer
history_buffer[history_buffer_idx] = out_data[0];
// also add the most recent written value to the cache
history_buffer_cache_val[kCacheDepth] = out_data[0];
history_buffer_cache_idx[kCacheDepth] = history_buffer_idx;
#pragma unroll
for (int j = 0; j < kCacheDepth; j++) {
history_buffer_cache_val[j] = history_buffer_cache_val[j + 1];
history_buffer_cache_idx[j] = history_buffer_cache_idx[j + 1];
}
// move the write index
history_buffer_idx =
(history_buffer_idx + ac_uint<1>(1)) & history_buffer_idx_mask;
// write the output to the pipe
OutPipe::write(OutPipeBundleT(out_data));
}
}
OutPipe::write(OutPipeBundleT(true));
}
//
// The top level LZ77 decoder that selects between the single- and
// multi-element variants above, at compile time.
//
// Template parameters:
// InPipe: a SYCL pipe that streams in LZ77InputData with a boolean flag that
// indicates whether the input stream is done.
// OutPipe: a SYCL pipe that streams out an array of literals and a
// 'valid_count' that is in the range [0, literals_per_cycle].
// literals_per_cycle: the number of literals to read from the history
// buffer at once. This sets the maximum possible throughput for the
// LZ77 decoder.
// max_distance: the maximum distancefor a {length, distance} pair
// For example, for DEFLATE this is 32K and for snappy this is 64K.
// max_length: the maximum length for a {length, distance} pair.
//
template <typename InPipe, typename OutPipe, size_t literals_per_cycle,
size_t max_distance, size_t max_length>
void LZ77Decoder() {
// check that the input and output pipe types are actually pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// these numbers need to be greater than 0 and powers of 2
static_assert(literals_per_cycle > 0);
static_assert(max_distance > 0);
static_assert(max_length > 0);
static_assert(fpga_tools::IsPow2(literals_per_cycle));
static_assert(fpga_tools::IsPow2(max_distance));
// input type rules:
// must have a member named 'flag' which is a boolean
// must have a member named 'data' which is an instance of LZ77InputData
// the max_distance and max_length of LZ77InputData must match the function's
using InPipeBundleT = decltype(InPipe::read());
static_assert(has_flag_bool_v<InPipeBundleT>);
static_assert(has_data_member_v<InPipeBundleT>);
using InDataT = decltype(std::declval<InPipeBundleT>().data);
static_assert(is_lz77_input_data_v<InDataT>);
static_assert(InDataT::literals_per_cycle <= literals_per_cycle);
static_assert(InDataT::max_distance == max_distance);
static_assert(InDataT::max_length == max_length);
// output type rules:
// must have a member named 'flag' which is a boolean
// must have a member named 'data' which has a subscript operator and a
// member named 'valid_count'
using OutPipeBundleT = decltype(OutPipe::read());
static_assert(has_flag_bool_v<OutPipeBundleT>);
static_assert(has_data_member_v<OutPipeBundleT>);
using OutDataT = decltype(std::declval<OutPipeBundleT>().data);
static_assert(fpga_tools::has_subscript_v<OutDataT>);
static_assert(has_valid_count_member_v<OutDataT>);
// make sure we can construct the OutPipeBundleT from OutDataT and/or a bool
static_assert(std::is_constructible_v<OutPipeBundleT, OutDataT>);
static_assert(std::is_constructible_v<OutPipeBundleT, OutDataT, bool>);
static_assert(std::is_constructible_v<OutPipeBundleT, bool>);
// select which LZ77 decoder version to use based on literals_per_cycle
// at compile time
if constexpr (literals_per_cycle == 1) {
return LZ77DecoderSingleElement<InPipe, OutPipe, max_distance>();
} else {
return LZ77DecoderMultiElement<InPipe, OutPipe, literals_per_cycle,
max_distance>();
}
}
//
// Creates a kernel from the LZ77 decoder function
//
template <typename Id, typename InPipe, typename OutPipe,
size_t literals_per_cycle, size_t max_distance, size_t max_length>
sycl::event SubmitLZ77Decoder(sycl::queue& q) {
return q.single_task<Id>([=] {
return LZ77Decoder<InPipe, OutPipe, literals_per_cycle, max_distance,
max_length>();
});
}
#endif /* __LZ77_DECODER_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/common/common_metaprogramming.hpp | #ifndef __COMMON_METAPROGRAMMING_HPP__
#define __COMMON_METAPROGRAMMING_HPP__
//
// Metaprogamming utility to check if a class has a boolean member named 'flag'
//
namespace detail {
template <typename T, typename = bool>
struct has_flag_bool_impl : std::false_type {};
template <typename T>
struct has_flag_bool_impl<T, decltype(T::flag)> : std::true_type {};
} // namespace detail
template <typename T>
struct has_flag_bool {
static constexpr bool value = detail::has_flag_bool_impl<T>{};
};
template <typename T>
inline constexpr bool has_flag_bool_v = has_flag_bool<T>::value;
//
// Metaprogamming utility to check if a class has any member named 'data'
//
namespace detail {
template <typename T, typename = int>
struct has_data_member_impl : std::false_type {};
template <typename T>
struct has_data_member_impl<T, decltype((void)T::data, 0)> : std::true_type {};
} // namespace detail
template <typename T>
struct has_data_member {
static constexpr bool value = detail::has_data_member_impl<T>{};
};
template <typename T>
inline constexpr bool has_data_member_v = has_data_member<T>::value;
//
// Metaprogamming utility to check if a class has any member named 'valid_count'
//
namespace detail {
template <typename T, typename = int>
struct has_valid_count_member_impl : std::false_type {};
template <typename T>
struct has_valid_count_member_impl<T, decltype((void)T::valid_count, 0)>
: std::true_type {};
} // namespace detail
template <typename T>
struct has_valid_count_member {
static constexpr bool value = detail::has_valid_count_member_impl<T>{};
};
template <typename T>
inline constexpr bool has_valid_count_member_v =
has_valid_count_member<T>::value;
#endif /* __COMMON_METAPROGRAMMING_HPP__ */
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/common/byte_stacker.hpp | #ifndef __BYTE_STACKER_HPP__
#define __BYTE_STACKER_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "constexpr_math.hpp" // included from ../../../../include
#include "metaprogramming_utils.hpp" // included from ../../../../include
//
// Data streams in the 'InPipe' pipe and can have between 0 to
// 'literals_per_cycle' valid elements at once. This function takes the input
// and "stacks" it, such that the output is always 'literals_per_cycle' valid
// elements (except possibly the last write).
//
// Template parameters:
// InPipe: a SYCL pipe that streams in an array of bytes and a `valid_count`,
// which is in the range [0, literals_per_cycle]
// OutPipe: a SYCL pipe that streams out an array of 'literals_per_cycle'
// valid bytes on every write, except possibly the last iteration.
// literals_per_cycle: the maximum valid bytes on the input and the number
// of valid bytes on the output (except possibly the last iteration).
//
template <typename InPipe, typename OutPipe, unsigned literals_per_cycle>
void ByteStacker() {
// check that the input and output pipe types are actually pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// literals_per_cycle must be greater than 0
static_assert(literals_per_cycle > 0);
// input type rules:
// must have a member named 'flag' which is a boolean
// must have a member named 'data' which has a subscript operator and a
// member named 'valid_count'
using InPipeBundleT = decltype(InPipe::read());
static_assert(has_flag_bool_v<InPipeBundleT>);
static_assert(has_data_member_v<InPipeBundleT>);
using InDataT = decltype(std::declval<InPipeBundleT>().data);
static_assert(fpga_tools::has_subscript_v<InDataT>);
static_assert(has_valid_count_member_v<InDataT>);
// output type rules:
// same as input data
using OutPipeBundleT = decltype(OutPipe::read());
static_assert(has_flag_bool_v<OutPipeBundleT>);
static_assert(has_data_member_v<OutPipeBundleT>);
using OutDataT = decltype(std::declval<OutPipeBundleT>().data);
static_assert(fpga_tools::has_subscript_v<OutDataT>);
static_assert(has_valid_count_member_v<OutDataT>);
// make sure we can construct the OutPipeBundleT from OutDataT and/or a bool
static_assert(std::is_constructible_v<OutPipeBundleT, OutDataT>);
static_assert(std::is_constructible_v<OutPipeBundleT, OutDataT, bool>);
static_assert(std::is_constructible_v<OutPipeBundleT, bool>);
// the number of bits needed to count from 0 to literals_per_cycle * 2
constexpr int cache_idx_bits = fpga_tools::Log2(literals_per_cycle * 2) + 1;
// cache up to literals_per_cycle * 2 elements so that we can always
// write out literals_per_cycle valid elements in a row (except on the last
// iteration)
ac_uint<cache_idx_bits> cache_idx = 0;
[[intel::fpga_register]] unsigned char cache_buf[literals_per_cycle * 2];
bool done = false;
while (!done) {
// try to read in some data
bool data_valid;
auto pipe_data = InPipe::read(data_valid);
done = pipe_data.flag && data_valid;
// add the valid data we read in to the cache
if (data_valid && !done) {
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
if (i < pipe_data.data.valid_count) {
cache_buf[cache_idx + i] = pipe_data.data[i];
}
}
cache_idx += pipe_data.data.valid_count;
}
// if there are enough elements in the cache to write out
// 'literals_per_cycle' valid elements, or if the upstream kernel indicated
// that it is done producing data, then write to the output pipe
if (cache_idx >= literals_per_cycle || done) {
// create the output from the current cache
OutDataT out_data;
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
// copy the character
out_data[i] = cache_buf[i];
// shift the extra characters to the front of the cache
cache_buf[i] = cache_buf[i + literals_per_cycle];
}
// mark output with the number of valid elements
if (cache_idx <= literals_per_cycle) {
out_data.valid_count = cache_idx;
} else {
out_data.valid_count = literals_per_cycle;
}
// decrement cache_idx by number of elements we read
// it is safe to always subtract literals_per_cycle since that can only
// result in a negative number on the last iteration of the outer while
// loop (when 'done' is true), at which point the value will never be used
cache_idx -= ac_uint<cache_idx_bits>(literals_per_cycle);
// write output
OutPipe::write(OutPipeBundleT(out_data));
}
}
// notify downstream kernel that we are done
OutPipe::write(OutPipeBundleT(true));
}
// Creates a kernel from the byte stacker kernel
template <typename Id, typename InPipe, typename OutPipe,
unsigned literals_per_cycle>
sycl::event SubmitByteStacker(sycl::queue& q) {
return q.single_task<Id>([=] {
ByteStacker<InPipe, OutPipe, literals_per_cycle>();
});
}
#endif /* __BYTE_STACKER_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/common/common.hpp | #ifndef __COMMON_HPP__
#define __COMMON_HPP__
#include <sycl/sycl.hpp>
#include <functional>
#include <iostream>
#include <optional>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "common_metaprogramming.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "memory_utils.hpp" // included from ../../../../include
// we only use unsigned ac_ints in this design, so this alias lets us not write
// the 'false' template argument every where
template <int bits>
using ac_uint = ac_int<bits, false>;
//
// Extend a type 'T' with a boolean flag
//
template <typename T>
struct FlagBundle {
using value_type = T;
// ensure the type carried in this class has a subscript operator and that
// it has a static integer member named 'size'
static_assert(fpga_tools::has_subscript_v<T>);
// this is used by the functions in memory_utils.hpp to ensure the size of
// the type in the SYCL pipe matches the memory width
static constexpr size_t size = T::size;
FlagBundle() : data(T()), flag(false) {}
FlagBundle(T d_in) : data(d_in), flag(false) {}
FlagBundle(T d_in, bool f_in) : data(d_in), flag(f_in) {}
FlagBundle(bool f_in) : data(T()), flag(f_in) {}
unsigned char& operator[](int i) { return data[i]; }
const unsigned char& operator[](int i) const { return data[i]; }
T data;
bool flag;
};
//
// The generic data that goes into the LZ77 decoder from the Huffman decoder
//
// Template parameters:
// literals_per_cycle_: the number of literals coming into the LZ77 decoder
// at once LZ77 decoder. This is NOT the literals_per_cycle that sets how
// many literals the LZ77 decoder reads from the history buffer at once.
// max_distance_: The maximum distance value in a {length, distance} pair.
// This sets how may bits are required for the variable used to index into
// the LZ77 decoder's history buffer.
// max_length_: the maximum length value in a {lengt, distance} pair.
//
template <size_t literals_per_cycle_, size_t max_distance_, size_t max_length_>
struct LZ77InputData {
static constexpr auto literals_per_cycle = literals_per_cycle_;
static constexpr auto max_distance = max_distance_;
static constexpr auto max_length = max_length_;
static_assert(literals_per_cycle_ > 0);
static_assert(max_length > 0);
static_assert(max_distance > 0);
static constexpr size_t size = literals_per_cycle_;
static constexpr size_t max_literals = literals_per_cycle_;
static constexpr size_t valid_count_bits = fpga_tools::Log2(max_literals) + 1;
static constexpr size_t length_bits = fpga_tools::Log2(max_length) + 1;
static constexpr size_t distance_bits = fpga_tools::Log2(max_distance) + 1;
static_assert(valid_count_bits > 0);
static_assert(literals_per_cycle < fpga_tools::Pow2(valid_count_bits));
static_assert(length_bits > 0);
static_assert(max_length < fpga_tools::Pow2(length_bits));
static_assert(distance_bits > 0);
static_assert(max_distance < fpga_tools::Pow2(distance_bits));
LZ77InputData() {}
// indicates whether this is a literal or {length, distance} pair
bool is_literal;
// either the literals, or the length from the {length, distance} pair
union {
ac_uint<length_bits> length;
unsigned char literal[literals_per_cycle_];
};
// either the number of valid literals, or the distance in the
// {length, distance} pair
union {
ac_uint<distance_bits> distance;
ac_uint<valid_count_bits> valid_count;
};
unsigned char& operator[](int i) { return literal[i]; }
const unsigned char& operator[](int i) const { return literal[i]; }
};
//
// Metaprogramming utils to check if a type is any instance of LZ77InputData
//
namespace detail {
template <typename T>
struct is_lz77_input_data_impl : std::false_type {};
template <unsigned a, unsigned b, unsigned c>
struct is_lz77_input_data_impl<LZ77InputData<a, b, c>> : std::true_type {};
} // namespace detail
template <class T>
struct is_lz77_input_data {
static constexpr bool value = detail::is_lz77_input_data_impl<T>{};
};
template <class T>
inline constexpr bool is_lz77_input_data_v = is_lz77_input_data<T>::value;
// The LZ77 datastructure specific for the GZIP decompressor
constexpr size_t kGzipMaxLZ77Length = 32768;
constexpr size_t kGzipMaxLZ77Distance = 32768;
using GzipLZ77InputData =
LZ77InputData<1, kGzipMaxLZ77Distance, kGzipMaxLZ77Length>;
// The LZ77 datastructure specific for the Snappy decompressor
// Snappy V1.1 format sets the maximum history to 65K
// At the time of writing this, the maximum history distance will be 32K, but
// the specification claims support for 65K, so we will be safe.
constexpr size_t kSnappyMaxLZ77Length = 64;
constexpr size_t kSnappyMaxLZ77Distance = 1 << 16;
template <size_t n>
using SnappyLZ77InputData =
LZ77InputData<n, kSnappyMaxLZ77Distance, kSnappyMaxLZ77Length>;
//
// Holds an array of bytes, where valid_count indicates how many of the 'n'
// bytes are valid. The valid bytes must be sequential and start at index 0.
// E.g., if valid_count = 2, then byte[0] and byte[1] are valid, while byte[2],
// byte[3], ..., byte[n-1] are not.
//
template <size_t num_bytes>
struct BytePack {
static constexpr unsigned count_bits = fpga_tools::Log2(num_bytes) + 1;
static_assert(count_bits > 0);
static_assert(num_bytes < fpga_tools::Pow2(count_bits));
static constexpr size_t size = num_bytes;
unsigned char byte[num_bytes];
ac_uint<count_bits> valid_count;
unsigned char& operator[](int i) { return byte[i]; }
const unsigned char& operator[](int i) const { return byte[i]; }
};
//
// Similar to a BytePack, but all of the bytes are valid.
//
template <size_t num_bytes>
struct ByteSet {
static constexpr size_t size = num_bytes;
unsigned char byte[num_bytes];
unsigned char& operator[](int i) { return byte[i]; }
const unsigned char& operator[](int i) const { return byte[i]; }
};
//
// returns the number of trailing zeros in an ac_int
// E.g. 0b011101000 has 3 trailing zeros
//
template <int bits, bool is_signed>
auto CTZ(const ac_int<bits, is_signed>& in) {
static_assert(bits > 0);
constexpr int out_bits = fpga_tools::Log2(bits) + 1;
ac_uint<out_bits> ret(bits);
#pragma unroll
for (int i = bits - 1; i >= 0; i--) {
if (in[i]) {
ret = i;
}
}
return ret;
}
//
// Reads 'filename' and returns an array of chars (the bytes of the file)
//
std::vector<unsigned char> ReadInputFile(const std::string& filename) {
// open file stream
std::ifstream fin(filename, std::ios::binary);
// make sure it opened
if (!fin.good() || !fin.is_open()) {
std::cerr << "ERROR: could not open " << filename << " for reading\n";
std::terminate();
}
// read in bytes
std::vector<unsigned char> result;
char tmp;
while (fin.get(tmp)) {
result.push_back(tmp);
}
fin.close();
return result;
}
//
// Writes the chars (bytes) from 'data' to 'filename'
//
void WriteOutputFile(const std::string& filename,
std::vector<unsigned char>& data) {
// open file stream
std::ofstream fout(filename.c_str());
// make sure it opened
if (!fout.good() || !fout.is_open()) {
std::cerr << "ERROR: could not open " << filename << " for writing\n";
std::terminate();
}
// write out bytes
for (auto& c : data) {
fout << c;
}
fout.close();
}
//
// A base class for a decmompressor
// This class is purely virtual, i.e. another class must inherit from it and
// override the 'DecompressBytes' function. This is done in
// ../gzip/gzip_decompress.hpp and ../src/snappy/snappy_decompressor.hpp for the
// GZIP and SNAPPY decompressors, respectively.
//
class DecompressorBase {
public:
//
// A virtual function that must be overriden by a deriving class.
// The overriding function performs the actual decompression using the FPGA.
// See ../gzip/gzip_decompressor.hpp and ../snappy/snappy_decompressor.hpp
// for the GZIP and SNAPPY versions of these, respectively
//
virtual std::optional<std::vector<unsigned char>> DecompressBytes(
sycl::queue&, std::vector<unsigned char>&, int, bool) = 0;
//
// Reads the bytes in 'in_filename', decompresses them, and writes the
// output to 'out_filename' (if write_output == true). This function uses
// the DecompressBytes virtual function above to do the actual decompression.
//
// Arguments:
// q: the SYCL queue
// in_filename: the file path to the compressed input file
// out_filename: the file path where to write the output file
// runs: the number of times to call decompress the same file. This is for
// throughput testing purposes.
// print_stats: whether to print the execution time and throughput
// statistics to stdout
// write_output: whether to write the decompressed output to 'out_filename'
//
bool DecompressFile(sycl::queue& q, const std::string& in_filename,
const std::string& out_filename, int runs,
bool print_stats, bool write_output) {
std::cout << "Decompressing '" << in_filename << "' " << runs
<< ((runs == 1) ? " time" : " times") << std::endl;
auto in_bytes = ReadInputFile(in_filename);
auto result = DecompressBytes(q, in_bytes, runs, print_stats);
if (result != std::nullopt) {
if (write_output) {
std::cout << std::endl;
std::cout << "Writing output data to '" << out_filename << "'"
<< std::endl;
std::cout << std::endl;
WriteOutputFile(out_filename, result.value());
}
return true;
} else {
return false;
}
}
};
//
// The Producer kernel reads 'literals_per_cycle' elements at a time from
// memory (in_ptr) and writes them into InPipe. We use the utilities from
// DirectProgramming/C++SYCL_FPGA/include/memory_utils.hpp to do this.
//
// Template parameters:
// Id: the type to use for the kernel ID
// InPipe: a SYCL pipe that streams bytes into the decompression engine,
// 'literals_per_cycle' at a time
// literals_per_cycle: the number of bytes to read from the pointer and
// write to the pipe at once.
//
// Arguments:
// q: the SYCL queue
// in_count_padded: the total number of bytes to read from in_ptr and write
// to the input pipe. In this design, we pad the size to be a multiple of
// literals_per_cycle.
// in_ptr: a pointer to the input data
//
template <typename Id, typename InPipe, unsigned literals_per_cycle>
sycl::event SubmitProducer(sycl::queue& q, unsigned in_count_padded,
unsigned char* in_ptr) {
assert(in_count_padded % literals_per_cycle == 0);
auto iteration_count = in_count_padded / literals_per_cycle;
return q.single_task<Id>([=] {
// Use the MemoryToPipe utility to read from in_ptr 'literals_per_cycle'
// elements at once and write them to 'InPipe'.
// The 'false' template argument is our way of guaranteeing to the library
// that 'literals_per_cycle is a multiple 'iteration_count'. In both the
// GZIP and SNAPPY designs, we guarantee this in the DecompressBytes
// functions in ../gzip/gzip_decompressor.hpp and
// ../snappy/snappy_decompressor.hpp respectively.
#if defined (IS_BSP)
// When targeting a BSP, we instruct the compiler that this pointer
// lives on the device.
// Knowing this, the compiler won't generate hardware to
// potentially get data from the host.
sycl::device_ptr<unsigned char> in(in_ptr);
#else
// Device pointers are not supported when targeting an FPGA
// family/part
unsigned char* in(in_ptr);
#endif
fpga_tools::MemoryToPipe<InPipe, literals_per_cycle, false>(
in, iteration_count);
});
}
//
// Same idea as SubmitProducer but in the opposite direction. Data is streamed
// from the SYCL pipe (OutPipe) and written to memory (out_ptr).
//
// Template parameters:
// Id: the type to use for the kernel ID
// InPipe: a SYCL pipe that streams bytes from the decompression engine,
// 'literals_per_cycle' at a time
// literals_per_cycle: the number of bytes to read from pipe and write to the
// pointer at once.
//
// Arguments:
// q: the SYCL queue
// out_count_padded: the total number of bytes to read from input pipe and
// write to out_ptr. In this design, we pad the size to be a multiple of
// literals_per_cycle.
// out_ptr: a pointer to the output data
//
template <typename Id, typename OutPipe, unsigned literals_per_cycle>
sycl::event SubmitConsumer(sycl::queue& q, unsigned out_count_padded,
unsigned char* out_ptr) {
assert(out_count_padded % literals_per_cycle == 0);
auto iteration_count = out_count_padded / literals_per_cycle;
return q.single_task<Id>([=] {
// Use the PipeToMemory utility to read 'literals_per_cycle'
// elements at once from 'OutPipe' and write them to 'out_ptr'.
// For details about the 'false' template parameter, see the SubmitProducer
// function above.
#if defined (IS_BSP)
// When targeting a BSP, we instruct the compiler that this pointer
// lives on the device.
// Knowing this, the compiler won't generate hardware to
// potentially get data from the host.
sycl::device_ptr<unsigned char> out(out_ptr);
#else
// Device pointers are not supported when targeting an FPGA
// family/part
unsigned char* out(out_ptr);
#endif
fpga_tools::PipeToMemory<OutPipe, literals_per_cycle, false>(
out, iteration_count);
// read the last 'done' signal
bool done = false;
while (!done) {
bool valid;
auto d = OutPipe::read(valid);
done = d.flag && valid;
}
});
}
#endif /* __COMMON_HPP__ */
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/gzip/gzip_metadata_reader.hpp | #ifndef __GZIP_METADATA_READER_HPP__
#define __GZIP_METADATA_READER_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../common/common.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "gzip_header_data.hpp"
#include "metaprogramming_utils.hpp" // included from ../../../../include
//
// A kernel that streams in bytes of the GZIP file, strips away (and parses) the
// GZIP header and footer, and streams out the uncompressed data.
// The output of this kernel is a stream of DEFLATE formatted blocks.
//
// Template parameters:
// InPipe: a SYCL pipe that streams in compressed GZIP data, 1 byte at a time
// OutPipe: a SYCL pipe that streams out the compressed GZIP data, 1 byte at
// a time excluding the GZIP header and footer data
//
// Arguments:
// in_count: the number of compressed bytes
// hdr_data: the parsed GZIP header
// crc: the parsed CRC from the GZIP footer
// out_count: the parsed uncompressed size from the GZIP footer
//
template <typename InPipe, typename OutPipe>
void GzipMetadataReader(int in_count, GzipHeaderData& hdr_data, int& crc,
int& out_count) {
// ensure the InPipe and OutPipe are SYCL pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// the input and output pipe data types
using InPipeBundleT = decltype(InPipe::read());
using OutPipeBundleT = decltype(OutPipe::read());
// make sure the input and output types are correct
static_assert(std::is_same_v<InPipeBundleT, ByteSet<1>>);
static_assert(std::is_same_v<OutPipeBundleT, FlagBundle<ByteSet<1>>>);
/*
GZIP FILE FORMAT:
===== HEADER =====
2 bytes: magic number (0x1f8b)
1 byte: compression method
1 byte: 'flags'
4 bytes: time
1 byte: extra flags
1 byte: OS
Read more bytes based on flags:
if flags & 0x01 != 0: Flag = Text
if flags & 0x04 != 0: Flag = Errata, read 2 bytes for 'length',
read 'length' more bytes
if flags & 0x08 != 0: Filename, read nullterminated string
if flags & 0x02 != 0: CRC-16, read 2 bytes
if flags & 0x10 != 0: Comment, read nullterminated string
===== DATA =====
1 or more consecutive DEFLATE compressed blocks
===== FOOTER =====
4 bytes: CRC-32 Checksum
4 bytes: Uncompressed data size in bytes
*/
// This kernel reads the entire file (HEADER, DATA, and FOOTER) from the input
// SYCL pipe 'InPipe, strips away and parses the HEADER and FOOTER, and
// forwards the DATA to the next kernel through the SYCL pipe 'OutPipe'
int i = 0;
bool i_in_range = 0 < in_count;
bool i_next_in_range = 1 < in_count;
short state_counter = 0;
short errata_len = 0;
unsigned char curr_byte;
GzipHeaderState state = GzipHeaderState::MagicNumber;
unsigned char header_magic[2];
unsigned char header_compression_method;
unsigned char header_flags;
unsigned char header_time[4];
unsigned char header_os;
unsigned char header_filename[256];
unsigned char header_crc[2];
header_filename[0] = '\0';
// NOTE: this loop is not the main processing loop and therefore is
// not critical (low trip count). However, the compiler doesn't know that
// and tries to optimize for throughput (~Fmax/II). However, we don't want
// this loop to be our Fmax bottleneck, so increase the II.
[[intel::initiation_interval(4)]] // NO-FORMAT: Attribute
while (state != GzipHeaderState::SteadyState) {
auto pipe_data = InPipe::read();
curr_byte = pipe_data[0];
// FSM for parsing the GZIP header, 1 byte at a time.
switch (state) {
case GzipHeaderState::MagicNumber: {
header_magic[state_counter] = curr_byte;
state_counter++;
if (state_counter == 2) {
state = GzipHeaderState::CompressionMethod;
state_counter = 0;
}
break;
}
case GzipHeaderState::CompressionMethod: {
header_compression_method = curr_byte;
state = GzipHeaderState::Flags;
break;
}
case GzipHeaderState::Flags: {
header_flags = curr_byte;
state = GzipHeaderState::Time;
break;
}
case GzipHeaderState::Time: {
header_time[state_counter] = curr_byte;
state_counter++;
if (state_counter == 4) {
state = GzipHeaderState::ExtraFlags;
state_counter = 0;
}
break;
}
case GzipHeaderState::ExtraFlags: {
state = GzipHeaderState::OS;
break;
}
case GzipHeaderState::OS: {
header_os = curr_byte;
if (header_flags & 0x04) {
state = GzipHeaderState::Errata;
} else if (header_flags & 0x08) {
state = GzipHeaderState::Filename;
} else if (header_flags & 0x02) {
state = GzipHeaderState::CRC;
} else if (header_flags & 0x10) {
state = GzipHeaderState::Comment;
} else {
state = GzipHeaderState::SteadyState;
}
break;
}
case GzipHeaderState::Errata: {
if (state_counter == 0) {
errata_len |= curr_byte;
state_counter++;
} else if (state_counter == 1) {
errata_len |= (curr_byte << 8);
state_counter++;
} else {
if ((state_counter - 2) == errata_len) {
if (header_flags & 0x08) {
state = GzipHeaderState::Filename;
} else if (header_flags & 0x02) {
state = GzipHeaderState::CRC;
} else if (header_flags & 0x10) {
state = GzipHeaderState::Comment;
} else {
state = GzipHeaderState::SteadyState;
}
state_counter = 0;
} else {
state_counter++;
}
}
break;
}
case GzipHeaderState::Filename: {
header_filename[state_counter] = curr_byte;
if (curr_byte == '\0') {
if (header_flags & 0x02) {
state = GzipHeaderState::CRC;
} else if (header_flags & 0x10) {
state = GzipHeaderState::Comment;
} else {
state = GzipHeaderState::SteadyState;
}
state_counter = 0;
} else {
state_counter++;
}
break;
}
case GzipHeaderState::CRC: {
if (state_counter == 0) {
header_crc[0] = curr_byte;
state_counter++;
} else if (state_counter == 1) {
header_crc[1] = curr_byte;
state_counter++;
} else {
if (header_flags & 0x10) {
state = GzipHeaderState::Comment;
} else {
state = GzipHeaderState::SteadyState;
}
state_counter = 0;
}
break;
}
case GzipHeaderState::Comment: {
if (curr_byte == '\0') {
state = GzipHeaderState::SteadyState;
state_counter = 0;
} else {
state_counter++;
}
break;
}
default: {
break;
}
}
i_in_range = i_next_in_range;
i_next_in_range = i < (in_count - 2);
i++;
}
// the last 8 bytes of the stream are the CRC and size (in bytes) of the file
// this data will be sent back to the host to help validate the output
unsigned char crc_bytes[4];
unsigned char size_bytes[4];
// finished reading the header, so now stream the bytes into the decompressor.
// keep track of the last 8 bytes, which are the crc and output size.
// NOTE: we DO care about the performance of this loop, because it will feed
// the rest of the decompressor.
while (i_in_range) {
bool valid_pipe_read;
auto pipe_data = InPipe::read(valid_pipe_read);
curr_byte = pipe_data[0];
if (valid_pipe_read) {
// keep track of the last 8 bytes
int remaining_bytes = (in_count - i - 1);
if (remaining_bytes < 8) {
if (remaining_bytes < 4) {
size_bytes[3 - remaining_bytes] = curr_byte;
} else {
crc_bytes[7 - remaining_bytes] = curr_byte;
}
}
OutPipe::write(OutPipeBundleT(pipe_data, (i == (in_count - 1))));
i_in_range = i_next_in_range;
i_next_in_range = i < (in_count - 2);
i++;
}
}
// parsing the GZIP footer
// construct the 32-bit CRC and size (out_count) from the last 8 bytes read
crc = 0;
out_count = 0;
for (int i = 0; i < 4; i++) {
crc |= (unsigned int)(crc_bytes[i]) << (i * 8);
out_count |= (unsigned int)(size_bytes[i]) << (i * 8);
}
// construct the header data
hdr_data.magic[0] = header_magic[0];
hdr_data.magic[1] = header_magic[1];
hdr_data.compression_method = header_compression_method;
hdr_data.flags = header_flags;
for (int i = 0; i < 4; i++) hdr_data.time[i] = header_time[i];
hdr_data.os = header_os;
for (int i = 0; i < 256; i++) hdr_data.filename[i] = header_filename[i];
hdr_data.crc[0] = header_crc[0];
hdr_data.crc[1] = header_crc[1];
}
//
// Creates a kernel from the GZIP metadata reader function
//
template <typename Id, typename InPipe, typename OutPipe>
sycl::event SubmitGzipMetadataReader(sycl::queue& q, int in_count,
GzipHeaderData* hdr_data_ptr, int* crc_ptr,
int* out_count_ptr) {
return q.single_task<Id>([=]() [[intel::kernel_args_restrict]] {
#if defined (IS_BSP)
// When targeting a BSP, we instruct the compiler that this pointer
// lives on the device.
// Knowing this, the compiler won't generate hardware to
// potentially get data from the host.
sycl::device_ptr<GzipHeaderData> hdr_data(hdr_data_ptr);
sycl::device_ptr<int> crc(crc_ptr);
sycl::device_ptr<int> out_count(out_count_ptr);
#else
// Device pointers are not supported when targeting an FPGA
// family/part
GzipHeaderData* hdr_data(hdr_data_ptr);
int* crc(crc_ptr);
int* out_count(out_count_ptr);
#endif
// local copies of the output data
GzipHeaderData hdr_data_loc;
int crc_loc;
int out_count_loc;
GzipMetadataReader<InPipe, OutPipe>(in_count, hdr_data_loc, crc_loc,
out_count_loc);
// write back the local copies of the output data
*hdr_data = hdr_data_loc;
*crc = crc_loc;
*out_count = out_count_loc;
});
}
#endif /* __GZIP_METADATA_READER_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/gzip/byte_bit_stream.hpp | #ifndef __BYTE_BIT_STREAM_HPP__
#define __BYTE_BIT_STREAM_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "constexpr_math.hpp" // included from ../../../../include
//
// A stream of bits that is filled with a byte at a time
//
// Template parameters:
// bits: the number of bits to store in the stream. We need to store at least
// 1 byte, so this should be at least 8.
// max_dynamic_read_bits: the maximum number of bits read from the stream
// 'dynamically' via the ReadUInt(ReadCountT) function.
// max_shift_bits: the maximum number of bits consumed by a single call
// to the Shift(ShiftCountT) function.
//
template <int bits, int max_dynamic_read_bits, int max_shift_bits>
class ByteBitStream {
// static asserts to make sure the template parameters make sense
static_assert(bits >= 8);
static_assert(max_dynamic_read_bits > 0);
static_assert(max_dynamic_read_bits <= bits);
static_assert(max_shift_bits > 0);
static_assert(max_shift_bits <= bits);
// an ac_int to count from 0 to 'bits', inclusive
static constexpr int count_bits = fpga_tools::Log2(bits) + 1;
using CountT = ac_int<count_bits, false>;
// an ac_int to count from 0 to 'max_dynamic_read_bits', inclusive
static constexpr int dynamic_read_count_bits =
fpga_tools::Log2(max_dynamic_read_bits) + 1;
using ReadCountT = ac_int<dynamic_read_count_bits, false>;
// an ac_int to count from 0 to 'max_shift_bits' inclusive
static constexpr int shift_count_bits = fpga_tools::Log2(max_shift_bits) + 1;
using ShiftCountT = ac_int<shift_count_bits, false>;
public:
ByteBitStream() : buf_(0), size_(0), space_(bits) {}
//
// read 'read_bits' bits from the bitstream and interpret them as an
// unsigned int, where 'read_bits' is a runtime variable
//
auto ReadUInt(ReadCountT read_bits) {
ac_int<max_dynamic_read_bits, false> mask = (1 << read_bits) - 1;
return buf_.template slc<max_dynamic_read_bits>(0) & mask;
}
//
// read 'read_bits' bits from the bitstream and interpret them as an
// unsigned int, where 'read_bits' is constexpr
//
template <int read_bits>
auto ReadUInt() {
static_assert(read_bits <= bits);
return buf_.template slc<read_bits>(0);
}
//
// shift the bitstream by 'shift_bits' bits
//
void Shift(ShiftCountT shift_bits) {
buf_ >>= shift_bits;
size_ -= shift_bits;
space_ += shift_bits;
}
//
// shift by some number of bits to realign to a byte boundary
//
void AlignToByteBoundary() {
auto bits_remaining_in_byte = size_.template slc<3>(0);
if (bits_remaining_in_byte != 0) {
Shift(bits_remaining_in_byte);
}
}
auto Size() { return size_; }
auto Space() { return space_; }
bool Empty() { return size_ == 0; }
bool HasSpaceForByte() { return space_ >= 8; }
//
// push in a new byte (8 bits) into the stream
// undefined behaviour if space_ < 8
//
void NewByte(unsigned char b) {
ac_int<8, false> b_ac_int(b);
buf_.template set_slc(size_, b_ac_int);
size_ += decltype(size_)(8);
space_ -= decltype(space_)(8);
}
private:
ac_int<bits, false> buf_;
CountT size_, space_;
};
#endif // __BYTE_BIT_STREAM_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/gzip/gzip_header_data.hpp | #ifndef __GZIP_HEADER_DATA_HPP__
#define __GZIP_HEADER_DATA_HPP__
#include <iomanip>
#include <iostream>
#include <string>
//
// States for parsing the GZIP header
//
enum class GzipHeaderState {
MagicNumber,
CompressionMethod,
Flags,
Time,
ExtraFlags,
OS,
Errata,
Filename,
CRC,
Comment,
SteadyState
};
//
// Stores the GZIP header data
//
struct GzipHeaderData {
GzipHeaderData() {
magic[0] = 0;
magic[1] = 0;
compression_method = 0;
flags = 0;
time[0] = 0;
time[1] = 0;
time[2] = 0;
time[3] = 0;
os = 0;
filename[0] = '\0';
crc[0] = 0;
crc[1] = 0;
}
unsigned short MagicNumber() const {
return ((unsigned short)(magic[0]) << 8) | (unsigned short)(magic[1]);
}
unsigned short CRC() const {
return ((unsigned short)(crc[0]) << 8) | (unsigned short)(crc[1]);
}
unsigned int Time() const {
unsigned int time_u = 0;
for (int i = 0; i < 4; i++) {
time_u |= ((unsigned int)(time[i]) << (8 * i));
}
return time_u;
}
std::string Filename() const {
std::string ret;
int i = 0;
while (i < 256 && filename[i] != '\0') {
ret.push_back(filename[i]);
i++;
}
return ret;
}
std::string OS() const {
switch (os) {
case 0:
return "FAT";
case 1:
return "Amiga";
case 2:
return "VMS";
case 3:
return "Unix";
case 4:
return "VM/CMS";
case 5:
return "Atari TOS";
case 6:
return "HPFS";
case 7:
return "Macintosh";
case 8:
return "Z-System";
case 9:
return "CP/M";
case 10:
return "TOPS-20";
case 11:
return "NTFS";
case 12:
return "Acorn RISCOS";
case 13:
return "FAT";
default:
return "Unknown";
}
}
unsigned char magic[2];
unsigned char compression_method;
unsigned char flags;
unsigned char time[4];
unsigned char os;
unsigned char filename[256];
unsigned char crc[2];
};
std::ostream& operator<<(std::ostream& os, const GzipHeaderData& hdr_data) {
std::ios_base::fmtflags save_flags;
os << "GZIP Header Data\n";
// magic number
save_flags = os.flags();
os << std::hex << std::setw(4) << std::setfill('0') << "Magic Number: 0x"
<< hdr_data.MagicNumber() << "\n";
os.flags(save_flags);
// compression method
os << "Compression method: "
<< ((hdr_data.compression_method == 8) ? "Supported" : "Not Supported")
<< "\n";
// flags
os << std::hex << std::setw(4) << std::setfill('0') << "Flags: 0x"
<< (unsigned short)(hdr_data.flags) << "\n";
os.flags(save_flags);
// time
os << "Time: " << hdr_data.Time() << "\n";
// OS
os << "OS: " << hdr_data.OS() << "\n";
// filename
os << "Filename: " << hdr_data.Filename() << "\n";
// CRC
os << std::hex << std::setw(4) << std::setfill('0') << "CRC: 0x"
<< hdr_data.CRC() << "\n";
os.flags(save_flags);
// ensure we restore flags
os.flags(save_flags);
return os;
}
#endif // __GZIP_HEADER_DATA_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/gzip/huffman_decoder.hpp | #ifndef __HUFFMAN_DECODER_HPP__
#define __HUFFMAN_DECODER_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../common/common.hpp"
#include "byte_bit_stream.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "metaprogramming_utils.hpp" // included from ../../../../include
// the size of the ByteBitStream buffer can be set from the compile comand.
// this number was set experimentally
#ifndef BIT_BUFFER_BITS
#define BIT_BUFFER_BITS 48
#endif
constexpr int kBitBufferBits = BIT_BUFFER_BITS;
// The maximum bits to read dynamically from the bitstream.
// More bits can be read statically using the templated Shift function.
// In the main processing loop, we are reading 30 bits every iteration,
// but for reading in the first and second table, we read a dynamic number of
// bits, with a maximum of 5
constexpr int kBitBufferMaxReadBits = 5;
// The maximum bits consumed in one Shift of the bitstream.
// The maximum number of bytes consumed in one iteration of the main loop is 30
constexpr int kBitBufferMaxShiftBits = 30;
// sanity check the kBitBufferBits
static_assert(kBitBufferBits > 8);
static_assert(kBitBufferBits > kBitBufferMaxReadBits);
static_assert(kBitBufferBits > kBitBufferMaxShiftBits);
// the ByteBitStream alias
using BitStreamT = ByteBitStream<kBitBufferBits, kBitBufferMaxReadBits,
kBitBufferMaxShiftBits>;
// constants for the static Huffman table
constexpr int kDeflateStaticNumLitLenCodes = 288;
constexpr int kDeflateStaticNumDistCodes = 32;
constexpr int kDeflateStaticTotalCodes =
kDeflateStaticNumLitLenCodes + kDeflateStaticNumDistCodes;
// forward declare the helper functions that are defined at the end of the file
namespace huffman_decoder_detail {
template <typename InPipe>
std::pair<bool, ac_uint<2>> ParseLastBlockAndBlockType(BitStreamT& bit_stream);
template <typename InPipe>
void ParseFirstTable(BitStreamT& bit_stream, ac_uint<9>& numlitlencodes,
ac_uint<6>& numdistcodes, ac_uint<5>& numcodelencodes,
ac_uint<8> codelencode_map_first_code[8],
ac_uint<8> codelencode_map_last_code[8],
ac_uint<5> codelencode_map_base_idx[8],
ac_uint<5> codelencode_map[19]);
template <typename InPipe>
void ParseSecondTable(
BitStreamT& bit_stream, bool is_static_huffman_block,
ac_uint<9> numlitlencodes, ac_uint<6> numdistcodes,
ac_uint<5> numcodelencodes, ac_uint<8> codelencode_map_first_code[8],
ac_uint<8> codelencode_map_last_code[8],
ac_uint<5> codelencode_map_base_idx[8], ac_uint<5> codelencode_map[19],
ac_uint<15> lit_map_first_code[15], ac_uint<15> lit_map_last_code[15],
ac_uint<9> lit_map_base_idx[15], ac_uint<9> lit_map[286],
ac_uint<15> dist_map_first_code[15], ac_uint<15> dist_map_last_code[15],
ac_uint<5> dist_map_base_idx[15], ac_uint<5> dist_map[32]);
} // namespace huffman_decoder_detail
//
// Performs Huffman Decoding.
// Streams in multiple DEFLATE blocks and performs huffman decoding for
// uncompressed, statically compressed, and dynamically compressed blocks.
// For dynamically compressed blocks, the huffman tables are also built from the
// input byte stream.
//
// Template parameters:
// InPipe: a SYCL pipe that streams in compressed data, 1 byte at a time
// OutPipe: a SYCL pipe that streams out either literals or
// {length, distance} pairs.
//
template <typename InPipe, typename OutPipe>
void HuffmanDecoder() {
// ensure the InPipe and OutPipe are SYCL pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// make sure the input and output types are correct
using InPipeBundleT = decltype(InPipe::read());
using OutPipeBundleT = decltype(OutPipe::read());
// make sure the input and output types are correct
static_assert(std::is_same_v<InPipeBundleT, FlagBundle<ByteSet<1>>>);
static_assert(std::is_same_v<OutPipeBundleT, FlagBundle<GzipLZ77InputData>>);
BitStreamT bit_stream;
bool last_block = false;
bool done_reading = false;
// Processing consecutive DEFLATE blocks
// Loop pipelining is disabled here because to reduce the amount of memory
// utilization caused by replicating local variables. Since the inner
// loop (the while(!block_done) loop) is the main processing loop, disabling
// pipelining here does not have a significant affect on the throughput
// of the design.
[[intel::disable_loop_pipelining]] // NO-FORMAT: Attribute
while (!last_block) {
////////////////////////////////////////////////////////////////////////////
// BEGIN: parse the first three bits of the block
auto [last_block_tmp, block_type] =
huffman_decoder_detail::ParseLastBlockAndBlockType<InPipe>(bit_stream);
last_block = last_block_tmp;
bool is_uncompressed_block = (block_type == 0);
bool is_static_huffman_block = (block_type == 1);
bool is_dynamic_huffman_block = (block_type == 2);
// END: parsing the first three bits
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
// BEGIN: parsing the code length table (first table)
ac_uint<9> numlitlencodes;
ac_uint<6> numdistcodes;
ac_uint<5> numcodelencodes;
// These four tables are the first huffman table in optimized form, which
// are used to decode the second Huffman table.
// They are read from the ParseFirstTable helper function.
[[intel::fpga_register]] ac_uint<8> codelencode_map_first_code[8];
[[intel::fpga_register]] ac_uint<8> codelencode_map_last_code[8];
[[intel::fpga_register]] ac_uint<5> codelencode_map_base_idx[8];
[[intel::fpga_register]] ac_uint<5> codelencode_map[19];
if (is_dynamic_huffman_block) {
huffman_decoder_detail::ParseFirstTable<InPipe>(
bit_stream, numlitlencodes, numdistcodes, numcodelencodes,
codelencode_map_first_code, codelencode_map_last_code,
codelencode_map_base_idx, codelencode_map);
}
// the number of literal and distance codes is known at compile time for
// static Huffman blocks
if (is_static_huffman_block) {
numlitlencodes = kDeflateStaticNumLitLenCodes;
numdistcodes = kDeflateStaticNumDistCodes;
}
// END: parsing the code length table (first table)
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
// BEGIN: parsing the literal and distance tables (the second table)
// These 8 tables are the second Huffman table in optimized form, which are
// used to decode the actual payload data in optimized form. They are read
// from the ParseSecondTable helper function.
[[intel::fpga_register]] ac_uint<15> lit_map_first_code[15];
[[intel::fpga_register]] ac_uint<15> lit_map_last_code[15];
[[intel::fpga_register]] ac_uint<9> lit_map_base_idx[15];
[[intel::fpga_register]] ac_uint<9> lit_map[286];
[[intel::fpga_register]] ac_uint<15> dist_map_first_code[15];
[[intel::fpga_register]] ac_uint<15> dist_map_last_code[15];
[[intel::fpga_register]] ac_uint<5> dist_map_base_idx[15];
[[intel::fpga_register]] ac_uint<5> dist_map[32];
if (is_static_huffman_block || is_dynamic_huffman_block) {
huffman_decoder_detail::ParseSecondTable<InPipe>(
bit_stream, is_static_huffman_block, numlitlencodes, numdistcodes,
numcodelencodes, codelencode_map_first_code,
codelencode_map_last_code, codelencode_map_base_idx, codelencode_map,
lit_map_first_code, lit_map_last_code, lit_map_base_idx, lit_map,
dist_map_first_code, dist_map_last_code, dist_map_base_idx, dist_map);
}
// END: parsing the literal and distance tables (the second table)
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
// BEGIN: decoding the bit stream (main computation loop)
// indicates whether we are reading a distance (or literal) currently
bool reading_distance = false;
// true if the stop code (256) has been decoded and the block is done
// track when a block is done:
// for compressed blocks (static and dynamic), when stop code is hit
// for uncompressed blocks, when all uncompressed bytes are read
bool block_done = false;
// true when output is ready. the output will be either a character or
// a length distance pair (see HuffmanData struct)
bool out_ready = false;
// the output of this kernel goes to the LZ77 decoder
GzipLZ77InputData out_data;
// for uncompressed blocks, the first 16 bits (after aligning to a byte)
// is the length (in bytes) of the uncompressed data, followed by 16-bits
// which is ~length (for error checking; we will ignore this).
bool parsing_uncompressed_len = true;
ac_uint<2> uncompressed_len_bytes_read = 0;
unsigned short uncompressed_bytes_remaining;
unsigned char first_four_bytes[4];
// if this is an uncompressed block, we must realign to a byte boundary
if (is_uncompressed_block) {
bit_stream.AlignToByteBoundary();
}
// the decoded literal or distance symbol in the main processing loop
ac_uint<9> lit_symbol;
ac_uint<5> dist_symbol;
// The main processing loop.
// The II of this main loop can be controlled from the command line using
// the -DHUFFMAN_II=<desired II>. By default, we let the the compiler choose
// the Fmax/II that maximizes throughput.
#ifdef HUFFMAN_MAIN_LOOP_II
[[intel::initiation_interval(HUFFMAN_II)]] // NO-FORMAT: Attribute
#endif
while (!block_done) {
// read in new data if the ByteBitStream has space for it
if (bit_stream.HasSpaceForByte()) {
bool read_valid;
auto pd = InPipe::read(read_valid);
if (read_valid) {
done_reading = pd.flag;
bit_stream.NewByte(pd.data[0]);
}
}
if (is_uncompressed_block) {
// uncompressed block case
if (bit_stream.Size() >= 8) {
// grab a byte from the bit stream
ac_uint<8> byte = bit_stream.ReadUInt<8>();
if (parsing_uncompressed_len) {
// first 16-bits are length, next 16-bits are ~length
first_four_bytes[uncompressed_len_bytes_read] = byte.to_uint();
if (uncompressed_len_bytes_read == 3) {
// uncompressed_bytes_remaining = uncompressed_len
// we will ignore uncompressed_len_n
uncompressed_bytes_remaining =
(unsigned short)(first_four_bytes[1] << 8) |
(unsigned short)(first_four_bytes[0]);
// done parsing uncompressed length
parsing_uncompressed_len = false;
}
uncompressed_len_bytes_read += 1;
} else {
// for uncompressed blocks, simply read an 8-bit character from the
// stream and write it to the output
out_data.is_literal = true;
out_data.literal[0] = byte;
out_data.valid_count = 1;
out_ready = true;
uncompressed_bytes_remaining--;
block_done = (uncompressed_bytes_remaining == 0);
}
bit_stream.Shift(ac_uint<4>(8));
}
} else if (bit_stream.Size() >= 30) {
// dynamic of static huffman block, and we know we have at least 30 bits
// read the next 30 bits (we know we have them)
ac_uint<30> next_bits = bit_stream.ReadUInt<30>();
// find all possible dynamic lengths
[[intel::fpga_register]] ac_uint<5> lit_extra_bit_vals[15][5];
#pragma unroll
for (int out_codelen = 1; out_codelen <= 15; out_codelen++) {
#pragma unroll
for (int in_codelen = 1; in_codelen <= 5; in_codelen++) {
ac_uint<5> codebits(0);
#pragma unroll
for (int bit = 0; bit < in_codelen; bit++) {
codebits[bit] = next_bits[out_codelen + bit] & 0x1;
}
lit_extra_bit_vals[out_codelen - 1][in_codelen - 1] = codebits;
}
}
// find all possible dynamic distances
[[intel::fpga_register]] ac_uint<15> dist_extra_bit_vals[15][15];
#pragma unroll
for (int out_codelen = 1; out_codelen <= 15; out_codelen++) {
#pragma unroll
for (int in_codelen = 1; in_codelen <= 15; in_codelen++) {
ac_uint<15> codebits(0);
#pragma unroll
for (int bit = 0; bit < in_codelen; bit++) {
codebits[bit] = next_bits[out_codelen + bit] & 0x1;
}
dist_extra_bit_vals[out_codelen - 1][in_codelen - 1] = codebits;
}
}
// find all possible code lengths and offsets
// codes are 1 to 15 bits long, so we will do 15 of these computations
// in parallel
ac_uint<15> lit_codelen_valid_bitmap(0);
ac_uint<15> dist_codelen_valid_bitmap(0);
[[intel::fpga_register]] ac_uint<9> lit_codelen_offset[15];
[[intel::fpga_register]] ac_uint<9> lit_codelen_base_idx[15];
[[intel::fpga_register]] ac_uint<5> dist_codelen_offset[15];
[[intel::fpga_register]] ac_uint<5> dist_codelen_base_idx[15];
#pragma unroll
for (unsigned char codelen = 1; codelen <= 15; codelen++) {
// Grab the 'codelen' bits we want and reverse them (little endian)
ac_uint<15> codebits(0);
#pragma unroll
for (unsigned char bit = 0; bit < codelen; bit++) {
codebits[codelen - bit - 1] = next_bits[bit];
}
// for this code length, get the base index, first valid code, and
// last valid code for both the literal and distance table
auto lit_base_idx = lit_map_base_idx[codelen - 1];
auto lit_first_code = lit_map_first_code[codelen - 1];
auto lit_last_code = lit_map_last_code[codelen - 1];
auto dist_base_idx = dist_map_base_idx[codelen - 1];
auto dist_first_code = dist_map_first_code[codelen - 1];
auto dist_last_code = dist_map_last_code[codelen - 1];
// checking a literal match
lit_codelen_valid_bitmap[codelen - 1] =
((codebits >= lit_first_code) && (codebits < lit_last_code)) ? 1
: 0;
lit_codelen_base_idx[codelen - 1] = lit_base_idx;
lit_codelen_offset[codelen - 1] = codebits - lit_first_code;
// checking a distance match
dist_codelen_valid_bitmap[codelen - 1] =
((codebits >= dist_first_code) && (codebits < dist_last_code))
? 1
: 0;
dist_codelen_base_idx[codelen - 1] = dist_base_idx;
dist_codelen_offset[codelen - 1] = codebits - dist_first_code;
}
// find the shortest matching length, which is the next decoded symbol
ac_uint<4> lit_shortest_match_len_idx = CTZ(lit_codelen_valid_bitmap);
ac_uint<4> lit_shortest_match_len =
lit_shortest_match_len_idx + ac_uint<1>(1);
ac_uint<4> dist_shortest_match_len_idx = CTZ(dist_codelen_valid_bitmap);
ac_uint<4> dist_shortest_match_len =
dist_shortest_match_len_idx + ac_uint<1>(1);
// get the base index and offset based on the shortest match length
auto lit_base_idx = lit_codelen_base_idx[lit_shortest_match_len_idx];
auto lit_offset = lit_codelen_offset[lit_shortest_match_len_idx];
auto dist_base_idx = dist_codelen_base_idx[dist_shortest_match_len_idx];
auto dist_offset = dist_codelen_offset[dist_shortest_match_len_idx];
ac_uint<9> lit_idx = lit_base_idx + lit_offset;
ac_uint<9> dist_idx = dist_base_idx + dist_offset;
// lookup the literal (literal or length) and distance using base_idx
// and offset
lit_symbol = lit_map[lit_idx];
dist_symbol = dist_map[dist_idx];
// we will either shift by shortest_match_len or by
// shortest_match_len + num_extra_bits based on whether we read a
// length, distance and/or extra bits.
// maximum value for shift_amount = 15 + 15 = 30, ceil(log2(30)) = 5
ac_uint<5> shift_amount;
if (!reading_distance) {
shift_amount = lit_shortest_match_len;
// currently parsing a symbol or length (same table)
if (lit_symbol == 256) {
// stop code hit, done this block
block_done = true;
} else if (lit_symbol < 256) {
// decoded a literal (i.e. not a length)
out_data.is_literal = true;
out_data.literal[0] = lit_symbol;
out_data.valid_count = 1;
out_ready = true;
} else if (lit_symbol <= 264) {
// decoded a length that doesn't require us to read more bits
out_data.is_literal = false;
out_data.length = lit_symbol - ac_uint<9>(254);
reading_distance = true;
} else if (lit_symbol <= 284) {
// decoded a length that requires us to read more bits
ac_uint<5> lit_symbol_small = lit_symbol.template slc<5>(0);
// (lit_symbol - 261) / 4
ac_uint<3> num_extra_bits = (lit_symbol_small - ac_uint<5>(5)) >> 2;
auto extra_bits_val = lit_extra_bit_vals[lit_shortest_match_len_idx]
[num_extra_bits - 1];
// ((((lit_symbol - 265) % 4) + 4) << num_extra_bits) +
// ac_uint<2>(3) + extra_bits_val
out_data.is_literal = false;
out_data.length =
((((lit_symbol_small - ac_uint<5>(9)) & 0x3) + ac_uint<3>(4))
<< num_extra_bits) +
ac_uint<2>(3) + extra_bits_val;
shift_amount = lit_shortest_match_len + num_extra_bits;
reading_distance = true;
} else if (lit_symbol == 285) {
// decoded a length that doesn't require us to read more bits
out_data.is_literal = false;
out_data.length = 258;
reading_distance = true;
} // else error, ignored
} else {
shift_amount = dist_shortest_match_len;
// currently decoding a distance symbol
if (dist_symbol <= 3) {
// decoded a distance that doesn't require us to read more bits
out_data.distance = dist_symbol + ac_uint<1>(1);
} else {
// decoded a distance that requires us to read more bits
ac_uint<4> num_extra_bits = (dist_symbol >> 1) - ac_uint<1>(1);
auto extra_bits_val =
dist_extra_bit_vals[dist_shortest_match_len_idx]
[num_extra_bits - 1];
out_data.distance =
(((dist_symbol & 0x1) + ac_uint<2>(2)) << num_extra_bits) +
ac_uint<1>(1) + extra_bits_val;
shift_amount = dist_shortest_match_len + num_extra_bits;
}
out_ready = true;
reading_distance = false;
}
// shift based on how many bits we read
bit_stream.Shift(shift_amount);
}
// output data to downstream kernel when ready
if (out_ready) {
OutPipe::write(OutPipeBundleT(out_data));
out_ready = false;
}
} // while (!block_done)
} // while (!last_block)
// END: decoding the bit stream (main computation loop)
////////////////////////////////////////////////////////////////////////////
// notify the downstream kernel that we are done
OutPipe::write(OutPipeBundleT(true));
// read out the remaining data from the pipe
// NOTE: don't really care about performance here since it reads out 8 bytes
// for CRC-32 and uncompressed size and maybe one more byte for padding.
while (!done_reading) {
bool read_valid;
auto pd = InPipe::read(read_valid);
done_reading = pd.flag && read_valid;
}
}
//
// Creates a kernel from the Huffman decoder function
//
template <typename Id, typename InPipe, typename OutPipe>
sycl::event SubmitHuffmanDecoder(sycl::queue& q) {
return q.single_task<Id>([=] {
HuffmanDecoder<InPipe, OutPipe>();
});
}
// helper functions for parsing the block headers
namespace huffman_decoder_detail {
//
// Parses the first 3 bits of the DEFLATE block returns their meaning:
// The first bit indicates whether this is the last block in a stream
// The following two bits indicate the block type:
// 0: uncompressed
// 1: static huffman
// 2: dynamic huffman
// 3: reserved
//
template <typename InPipe>
std::pair<bool, ac_uint<2>> ParseLastBlockAndBlockType(BitStreamT& bit_stream) {
// read in the first byte and add it to the byte bit stream
auto first_pipe_data = InPipe::read();
bit_stream.NewByte(first_pipe_data.data[0]);
// read the first three bits
ac_uint<3> first_three_bits = bit_stream.ReadUInt<3>();
bit_stream.Shift(3);
// first bit indicates whether this is the last block
bool last_block = (first_three_bits.slc<1>(0) == 1);
// next 2 bits indicate the block type
ac_uint<2> block_type = first_three_bits.slc<2>(1);
return std::make_pair(last_block, block_type);
}
//
// Parses the first Huffman table and creates the optimized Huffman table
// structure
//
template <typename InPipe>
void ParseFirstTable(BitStreamT& bit_stream,
// outputs
ac_uint<9>& numlitlencodes, ac_uint<6>& numdistcodes,
ac_uint<5>& numcodelencodes,
ac_uint<8> codelencode_map_first_code[8],
ac_uint<8> codelencode_map_last_code[8],
ac_uint<5> codelencode_map_base_idx[8],
ac_uint<5> codelencode_map[19]) {
ac_uint<2> first_table_state = 0;
unsigned short codelencodelen_count = 0;
// shuffle vector for the first table
constexpr unsigned short codelencodelen_idxs[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
[[intel::fpga_register]] ac_uint<3> codelencodelen[19];
#pragma unroll
for (int i = 0; i < 19; i++) {
codelencodelen[i] = 0;
}
bool parsing = true;
// NOTE: this loop is not the main processing loop and therefore is
// not critical (low trip count). However, the compiler doesn't know that
// and tries to optimize for throughput (~Fmax/II). We don't want
// this loop to be our Fmax bottleneck, so increase the II.
[[intel::initiation_interval(3)]] // NO-FORMAT: Attribute
while (parsing) {
// grab a byte if we have space for it
if (bit_stream.HasSpaceForByte()) {
bool read_valid;
auto pd = InPipe::read(read_valid);
if (read_valid) {
unsigned char c = pd.data[0];
bit_stream.NewByte(c);
}
}
// make sure we have enough bits (in the maximum case)
if (bit_stream.Size() >= 5) {
if (first_table_state == 0) {
// read the number of literal length codes
numlitlencodes = bit_stream.ReadUInt(5) + ac_uint<9>(257);
bit_stream.Shift(5);
first_table_state = 1;
} else if (first_table_state == 1) {
// read the number of distance length codes
numdistcodes = bit_stream.ReadUInt(5) + ac_uint<1>(1);
bit_stream.Shift(5);
first_table_state = 2;
} else if (first_table_state == 2) {
// read the number of code length codes (for encoding code lengths)
numcodelencodes = bit_stream.ReadUInt(4) + ac_uint<3>(4);
bit_stream.Shift(4);
first_table_state = 3;
} else if (codelencodelen_count < numcodelencodes) {
// read the code lengths themselves
auto tmp = bit_stream.ReadUInt(3);
bit_stream.Shift(3);
codelencodelen[codelencodelen_idxs[codelencodelen_count]] = tmp;
codelencodelen_count++;
parsing = (codelencodelen_count != numcodelencodes);
}
}
}
ac_uint<8> codelencode_map_next_code = 0;
ac_uint<5> codelencode_map_counter = 0;
for (unsigned char codelen = 1; codelen <= 8; codelen++) {
codelencode_map_next_code <<= 1;
codelencode_map_first_code[codelen - 1] = codelencode_map_next_code;
codelencode_map_base_idx[codelen - 1] = codelencode_map_counter;
for (unsigned short symbol = 0; symbol < 19; symbol++) {
auto inner_codelen = codelencodelen[symbol];
if (inner_codelen == codelen) {
codelencode_map[codelencode_map_counter] = symbol;
codelencode_map_counter++;
codelencode_map_next_code++;
}
}
codelencode_map_last_code[codelen - 1] = codelencode_map_next_code;
}
}
//
// Parses the second Huffman table and creates the optimized Huffman table
// structure
//
template <typename InPipe>
void ParseSecondTable(
BitStreamT& bit_stream, bool is_static_huffman_block,
ac_uint<9> numlitlencodes, ac_uint<6> numdistcodes,
ac_uint<5> numcodelencodes, ac_uint<8> codelencode_map_first_code[8],
ac_uint<8> codelencode_map_last_code[8],
ac_uint<5> codelencode_map_base_idx[8], ac_uint<5> codelencode_map[19],
// outputs
ac_uint<15> lit_map_first_code[15], ac_uint<15> lit_map_last_code[15],
ac_uint<9> lit_map_base_idx[15], ac_uint<9> lit_map[286],
ac_uint<15> dist_map_first_code[15], ac_uint<15> dist_map_last_code[15],
ac_uint<5> dist_map_base_idx[15], ac_uint<5> dist_map[32]) {
// length of codelens is MAX(numlitlencodes + numdistcodes)
// = MAX((2^5 + 257) + (2^5 + 1)) = 322
// maximum code length = 15, so requires 4 bits to store
ac_uint<4> codelens[322];
ac_uint<9> total_codes_second_table = numlitlencodes + numdistcodes;
decltype(total_codes_second_table) codelens_idx = 0;
bool decoding_next_symbol = true;
ac_uint<8> runlen; // MAX = (2^7 + 11)
int onecount = 0, otherpositivecount = 0;
ac_uint<4> extend_symbol;
// static codelens ROM (for static huffman encoding)
constexpr auto static_codelens = [] {
std::array<unsigned short, 320> a{};
// literal codes
for (int i = 0; i < kDeflateStaticNumLitLenCodes; i++) {
if (i < 144) {
a[i] = 8;
} else if (i < 144 + 112) {
a[i] = 9;
} else if (i < 144 + 112 + 24) {
a[i] = 7;
} else {
a[i] = 8;
}
}
// distance codes
for (int i = 0; i < kDeflateStaticNumDistCodes; i++) {
a[kDeflateStaticNumLitLenCodes + i] = 5;
}
return a;
}();
if (is_static_huffman_block) {
// for a static huffman block, initialize codelens with static codelens ROM
for (int i = 0; i < kDeflateStaticTotalCodes; i++) {
codelens[i] = static_codelens[i];
}
} else { // is_dynamic_huffman_block
// NOTE: this loop is not the main processing loop and therefore is
// not critical (low trip count). However, the compiler doesn't know that
// and tries to optimize for throughput (~Fmax/II). We don't want
// this loop to be our Fmax bottleneck, so increase the II.
[[intel::initiation_interval(3)]] // NO-FORMAT: Attribute
while (codelens_idx < total_codes_second_table) {
// read in another byte if we have space for it
if (bit_stream.HasSpaceForByte()) {
bool read_valid;
auto pd = InPipe::read(read_valid);
if (read_valid) {
bit_stream.NewByte(pd.data[0]);
}
}
if (decoding_next_symbol) {
// decoding the next code symbol, so make sure we have enough bits to
// do so 15 bits is the maximum bits to read both a symbol and the
// extra run length bits (max 8 bits for the symbol, max 7 bits for
// extra run length)
if (bit_stream.Size() >= 15) {
// read 15 bits
ac_uint<15> next_bits = bit_stream.ReadUInt<15>();
// find all possible dynamic run lengths
// the symbol could be from 1 to 8 bits long and the number of extra
// bits to read for the run length could be either 2, 3, or 7 bits
// (the 3 possibilities in 'runlen_bits').
[[intel::fpga_register]] ac_uint<7> extra_bit_vals[8][3];
constexpr int runlen_bits[] = {2, 3, 7};
#pragma unroll
for (int out_codelen = 1; out_codelen <= 8; out_codelen++) {
#pragma unroll
for (int j = 0; j < 3; j++) {
ac_uint<7> codebits(0);
#pragma unroll
for (int bit = 0; bit < runlen_bits[j]; bit++) {
codebits[bit] = next_bits[out_codelen + bit] & 0x1;
}
extra_bit_vals[out_codelen - 1][j] = codebits;
}
}
// decode all possible code symbols from 1 to 8 bits
ac_uint<8> codelencode_valid_bitmap(0);
[[intel::fpga_register]] ac_uint<5> codelencode_offset[8];
[[intel::fpga_register]] ac_uint<5> codelencode_base_idx[8];
#pragma unroll
for (int codelen = 1; codelen <= 8; codelen++) {
ac_uint<8> codebits(0);
#pragma unroll
for (int bit = 0; bit < codelen; bit++) {
codebits[codelen - bit - 1] = next_bits[bit] & 0x1;
}
auto base_idx = codelencode_map_base_idx[codelen - 1];
auto first_code = codelencode_map_first_code[codelen - 1];
auto last_code = codelencode_map_last_code[codelen - 1];
codelencode_base_idx[codelen - 1] = base_idx;
codelencode_valid_bitmap[codelen - 1] =
((codebits >= first_code) && (codebits < last_code)) ? 1 : 0;
codelencode_offset[codelen - 1] = codebits - first_code;
}
// find the shortest matching code symbol
ac_uint<3> shortest_match_len_idx = CTZ(codelencode_valid_bitmap);
ac_uint<3> shortest_match_len =
shortest_match_len_idx + ac_uint<1>(1);
ac_uint<5> base_idx = codelencode_base_idx[shortest_match_len_idx];
ac_uint<5> offset = codelencode_offset[shortest_match_len_idx];
// get the decoded symbol
auto symbol = codelencode_map[base_idx + offset];
// max shift amount will be 15 (8 bits for symbol, 7 for run length)
ac_uint<4> shift_amount;
// do logic based on symbol value
if (symbol <= 15) {
// ADD SYMBOL
codelens[codelens_idx++] = symbol;
decoding_next_symbol = true;
if (codelens_idx >= numlitlencodes) {
if (symbol == 1) {
onecount++;
} else if (symbol > 0) {
otherpositivecount++;
}
}
shift_amount = shortest_match_len;
} else if (symbol == 16) {
// READ 2-BIT RUN LENGTH, ADD 3, AND EXTEND LAST ELEMENT
runlen = extra_bit_vals[shortest_match_len_idx][0] + ac_uint<2>(3);
decoding_next_symbol = false;
extend_symbol = codelens[codelens_idx - 1];
shift_amount = shortest_match_len + ac_uint<2>(2);
} else if (symbol == 17) {
// READ 3-BIT RUN LENGTH, ADD 3, AND EXTEND WITH 0's
runlen = extra_bit_vals[shortest_match_len_idx][1] + ac_uint<2>(3);
decoding_next_symbol = false;
extend_symbol = 0;
shift_amount = shortest_match_len + ac_uint<2>(3);
} else if (symbol == 18) {
// READ 7-BIT RUN LENGTH, ADD 11, AND EXTEND WITH 0's
runlen = extra_bit_vals[shortest_match_len_idx][2] + ac_uint<4>(11);
decoding_next_symbol = false;
extend_symbol = 0;
shift_amount = shortest_match_len + ac_uint<3>(7);
}
// shift the bit stream
bit_stream.Shift(shift_amount);
}
} else {
// extending codelens
codelens[codelens_idx++] = extend_symbol;
if (codelens_idx >= numlitlencodes) {
if (extend_symbol == 1) {
onecount++;
} else if (extend_symbol > 0) {
otherpositivecount++;
}
}
// decrement the run length
runlen--;
// start reading decoding symbols again when runlen == 0
decoding_next_symbol = (runlen == 0);
}
}
// handle the case where only one distance code is defined add a dummy
// invalid code to make the Huffman tree complete
if (onecount == 1 && otherpositivecount == 0) {
int extend_amount = 32 - numdistcodes;
for (int i = 0; i < extend_amount; i++) {
codelens[numlitlencodes + numdistcodes + i] = 0;
}
codelens[numlitlencodes + 31] = 1;
numdistcodes += extend_amount;
}
}
// the first table is decoded, so now it is time to decode the second
// table, which is actually two tables:
// literal table (symbols and lengths for the {length, distance} pair)
// distance table (the distances for the {length, distance} pair)
//
// NOTE ON OPTIMIZATION: The DEFLATE algorithm defines the Huffman table
// by just storing the number of bits for each symbol. This method to
// convert from a list of code lengths to the Huffman table is such that
// codes of the same length (in bits) are sequential.
// Since the range of code length bits is [1, 15] bits, we can
// efficiently store the huffman table as follows:
// - Store the first and last code for each code length.
// - Store a map from the index to symbol in a map (lit_map and dist_map)
// - Store the base index for each code length into the map
// Then, given N bits, with a value of V, you can check for a match like so:
// bool match = (V >= first_code[N]) && (V < last_code[N]);
// And, if we get a match for this code, we can get the symbol like so:
// int offset = V - first_code[N];
// symbol = map[base_idx[N] + offset];
//
// This structure is the same for both the 'lit_map' and 'dist_map' below,
// the only difference being there are 286 possibilities for literals
// and 32 for distances. When decoding, the presence of a 'length' literal
// implies the next thing we decode is a distance.
// the maximum of numdistcodes and numlitlencodes
ac_uint<9> max_codes;
if (numdistcodes < numlitlencodes)
max_codes = numlitlencodes;
else
max_codes = numdistcodes;
ac_uint<15> lit_map_next_code = 0;
ac_uint<9> lit_map_counter = 0;
ac_uint<15> dist_map_next_code = 0;
ac_uint<5> dist_map_counter = 0;
for (unsigned char codelen = 1; codelen <= 15; codelen++) {
lit_map_next_code <<= 1;
lit_map_first_code[codelen - 1] = lit_map_next_code;
lit_map_base_idx[codelen - 1] = lit_map_counter;
dist_map_next_code <<= 1;
dist_map_first_code[codelen - 1] = dist_map_next_code;
dist_map_base_idx[codelen - 1] = dist_map_counter;
// this loop has been manually fused for the literal and distance codes
// we will iterate max(numdistcodes, numlitlencodes) times and predicate
// the decoding based on numdistcodes and numlitlencodes
for (unsigned short symbol = 0; symbol < max_codes; symbol++) {
// literal
if (symbol < numlitlencodes) {
auto inner_codelen = codelens[symbol];
if (inner_codelen == codelen) {
lit_map[lit_map_counter] = symbol;
lit_map_counter++;
lit_map_next_code++;
}
}
// distance
if (symbol < numdistcodes) {
auto inner_codelen = codelens[numlitlencodes + symbol];
if (inner_codelen == codelen) {
dist_map[dist_map_counter] = symbol;
dist_map_counter++;
dist_map_next_code++;
}
}
}
lit_map_last_code[codelen - 1] = lit_map_next_code;
dist_map_last_code[codelen - 1] = dist_map_next_code;
}
}
} // namespace huffman_decoder_detail
#endif /* __HUFFMAN_DECODER_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/gzip/gzip_decompressor.hpp | #ifndef __GZIP_DECOMPRESSOR_HPP__
#define __GZIP_DECOMPRESSOR_HPP__
#include <sycl/sycl.hpp>
#include <chrono>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../common/byte_stacker.hpp"
#include "../common/common.hpp"
#include "../common/lz77_decoder.hpp"
#include "../common/simple_crc32.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "gzip_metadata_reader.hpp"
#include "huffman_decoder.hpp"
#include "metaprogramming_utils.hpp" // included from ../../../../include
// declare the kernel and pipe names globally to reduce name mangling
class GzipMetadataReaderKernelID;
class HuffmanDecoderKernelID;
class LZ77DecoderKernelID;
class ByteStackerKernelID;
class GzipMetadataToHuffmanPipeID;
class HuffmanToLZ77PipeID;
class LZ77ToByteStackerPipeID;
// the depth of the pipe between the Huffman decoder and the LZ77 decoder.
// adding some extra depth here helps add some elasticity so that the Huffman
// decoder can computing while the LZ77 kernel reads from the history buffer
constexpr int kHuffmanToLZ77PipeDepth = 64;
//
// Submits the kernels for the GZIP decompression engine and returns a list of
// SYCL events from each kernel launch.
//
// Template parameters:
// InPipe: the input pipe that streams in compressed data, 1 byte at a time
// OutPipe: the output pipe that streams out decompressed data,
// 'literals_per_cycle' at a time
// literals_per_cycle: the maximum number of literals written to the output
// stream every cycle. This sets how many literals can be read from the
// LZ77 history buffer at once.
//
// Arguments:
// q: the SYCL queue
// in_count: the number of compressed bytes
// hdr_data_out: a output buffer for the GZIP header data
// crc_out: an output buffer for the CRC in the GZIP footer
// count_out: an output buffer for the uncompressed size in the GZIP footer
//
template <typename InPipe, typename OutPipe, unsigned literals_per_cycle>
std::vector<sycl::event> SubmitGzipDecompressKernels(
sycl::queue &q, int in_count, GzipHeaderData *hdr_data_out, int *crc_out,
int *count_out) {
// check that the input and output pipe types are actually pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// 'literals_per_cycle' must be greater than 0 and a power of 2
static_assert(literals_per_cycle > 0);
static_assert(fpga_tools::IsPow2(literals_per_cycle));
// the inter-kernel pipes for the GZIP decompression engine
using GzipMetadataToHuffmanPipe =
sycl::ext::intel::pipe<GzipMetadataToHuffmanPipeID,
FlagBundle<ByteSet<1>>>;
using HuffmanToLZ77Pipe =
sycl::ext::intel::pipe<HuffmanToLZ77PipeID, FlagBundle<GzipLZ77InputData>,
kHuffmanToLZ77PipeDepth>;
// submit the GZIP decompression kernels
auto header_event =
SubmitGzipMetadataReader<GzipMetadataReaderKernelID, InPipe,
GzipMetadataToHuffmanPipe>(
q, in_count, hdr_data_out, crc_out, count_out);
auto huffman_event =
SubmitHuffmanDecoder<HuffmanDecoderKernelID, GzipMetadataToHuffmanPipe,
HuffmanToLZ77Pipe>(q);
// the design only needs a ByteStacker kernel when literals_per_cycle > 1
if constexpr (literals_per_cycle > 1) {
using LZ77ToByteStackerPipe =
sycl::ext::intel::pipe<LZ77ToByteStackerPipeID,
FlagBundle<BytePack<literals_per_cycle>>>;
auto lz77_event =
SubmitLZ77Decoder<LZ77DecoderKernelID, HuffmanToLZ77Pipe,
LZ77ToByteStackerPipe, literals_per_cycle,
kGzipMaxLZ77Distance, kGzipMaxLZ77Length>(q);
auto byte_stacker_event =
SubmitByteStacker<ByteStackerKernelID, LZ77ToByteStackerPipe, OutPipe,
literals_per_cycle>(q);
return {header_event, huffman_event, lz77_event, byte_stacker_event};
} else {
auto lz77_event =
SubmitLZ77Decoder<LZ77DecoderKernelID, HuffmanToLZ77Pipe, OutPipe,
literals_per_cycle, kGzipMaxLZ77Distance,
kGzipMaxLZ77Length>(q);
return {header_event, huffman_event, lz77_event};
}
}
// declare kernel and pipe names at the global scope to reduce name mangling
class ProducerId;
class ConsumerId;
class InPipeId;
class OutPipeId;
// the input and output pipe
using InPipe = sycl::ext::intel::pipe<InPipeId, ByteSet<1>>;
using OutPipe =
sycl::ext::intel::pipe<OutPipeId, FlagBundle<BytePack<kLiteralsPerCycle>>>;
//
// The GZIP decompressor. See ../common/common.hpp for more information.
//
template <unsigned literals_per_cycle>
class GzipDecompressor : public DecompressorBase {
public:
std::optional<std::vector<unsigned char>> DecompressBytes(
sycl::queue &q, std::vector<unsigned char> &in_bytes, int runs,
bool print_stats) {
int in_count = in_bytes.size();
// read the expected output size from the last 4 bytes of the file
std::vector<unsigned char> last_4_bytes(in_bytes.end() - 4, in_bytes.end());
unsigned out_count = *(reinterpret_cast<unsigned *>(last_4_bytes.data()));
std::vector<unsigned char> out_bytes(out_count);
// round up the output count to the nearest multiple of literals_per_cycle,
// which allows us to not predicate the last writes to the output buffer
// from the device.
int out_count_padded =
fpga_tools::RoundUpToMultiple(out_count, literals_per_cycle);
// the GZIP header data. This is parsed by the GZIPMetadataReader kernel
GzipHeaderData hdr_data_h;
// the GZIP footer data. This is parsed by the GZIPMetadataReader kernel.
unsigned int crc_h, count_h;
// track timing information in ms
std::vector<double> time_ms(runs);
// input and output data pointers on the device using USM device allocations
unsigned char *in, *out;
// the GZIP header data (see gzip_header_data.hpp)
GzipHeaderData *hdr_data;
// the GZIP footer data, where 'count' is the expected number of bytes
// in the uncompressed file
int *crc, *count;
bool passed = true;
try {
#if defined (IS_BSP)
// allocate memory on the device
if ((in = sycl::malloc_device<unsigned char>(in_count, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'in'\n";
std::terminate();
}
if ((out = sycl::malloc_device<unsigned char>(out_count_padded, q)) ==
nullptr) {
std::cerr << "ERROR: could not allocate space for 'out'\n";
std::terminate();
}
if ((hdr_data = sycl::malloc_device<GzipHeaderData>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'hdr_data'\n";
std::terminate();
}
if ((crc = sycl::malloc_device<int>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'crc'\n";
std::terminate();
}
if ((count = sycl::malloc_device<int>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'count'\n";
std::terminate();
}
#else
// allocate shared memory
if ((in = sycl::malloc_shared<unsigned char>(in_count, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'in'\n";
std::terminate();
}
if ((out = sycl::malloc_shared<unsigned char>(out_count_padded, q)) ==
nullptr) {
std::cerr << "ERROR: could not allocate space for 'out'\n";
std::terminate();
}
if ((hdr_data = sycl::malloc_shared<GzipHeaderData>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'hdr_data'\n";
std::terminate();
}
if ((crc = sycl::malloc_shared<int>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'crc'\n";
std::terminate();
}
if ((count = sycl::malloc_shared<int>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'count'\n";
std::terminate();
}
#endif
// copy the input data to the device memory and wait for the copy to
// finish
q.memcpy(in, in_bytes.data(), in_count * sizeof(unsigned char)).wait();
// run the design multiple times to increase the accuracy of the timing
for (int i = 0; i < runs; i++) {
std::cout << "Launching kernels for run " << i << std::endl;
auto producer_event =
SubmitProducer<ProducerId, InPipe, 1>(q, in_count, in);
auto consumer_event =
SubmitConsumer<ConsumerId, OutPipe, literals_per_cycle>(
q, out_count_padded, out);
auto gzip_decompress_events =
SubmitGzipDecompressKernels<InPipe, OutPipe, literals_per_cycle>(
q, in_count, hdr_data, crc, count);
auto s = std::chrono::high_resolution_clock::now();
producer_event.wait();
consumer_event.wait();
auto e = std::chrono::high_resolution_clock::now();
// wait for the decompression kernels to finish
for (auto &e : gzip_decompress_events) {
e.wait();
}
std::cout << "All kernels have finished for run " << i << std::endl;
// duration in milliseconds
time_ms[i] = std::chrono::duration<double, std::milli>(e - s).count();
// Copy the output back from the device
q.memcpy(out_bytes.data(), out, out_count * sizeof(unsigned char))
.wait();
q.memcpy(&hdr_data_h, hdr_data, sizeof(GzipHeaderData)).wait();
q.memcpy(&crc_h, crc, sizeof(int)).wait();
q.memcpy(&count_h, count, sizeof(int)).wait();
// validating the output
// check the magic header we read
if (hdr_data_h.MagicNumber() != 0x1f8b) {
auto save_flags = std::cerr.flags();
std::cerr << "ERROR: Incorrect magic header value of 0x" << std::hex
<< std::setw(4) << std::setfill('0')
<< hdr_data_h.MagicNumber() << " (should be 0x1f8b)\n";
std::cerr.flags(save_flags);
passed = false;
}
// check the number of bytes we read
if (count_h != out_count) {
std::cerr << "ERROR: Out counts do not match: " << count_h
<< " != " << out_count << "(count_h != out_count)\n";
passed = false;
}
// compute the CRC of the output data
auto crc32_out = SimpleCRC32(0, out_bytes.data(), out_count);
// check that the computed CRC matches the expectation (crc_h is the
// CRC-32 that is in the GZIP footer).
if (crc32_out != crc_h) {
auto save_flags = std::cout.flags();
std::cerr << std::hex << std::setw(4) << std::setfill('0');
std::cerr << "ERROR: output data CRC does not match the expected CRC "
<< "0x" << crc32_out << " != 0x" << crc_h
<< " (result != expected)\n";
std::cout.flags(save_flags);
passed = false;
}
}
} catch (sycl::exception const &e) {
std::cout << "Caught a synchronous SYCL exception: " << e.what() << "\n";
std::terminate();
}
// free the allocated device memory
sycl::free(in, q);
sycl::free(out, q);
sycl::free(hdr_data, q);
sycl::free(crc, q);
sycl::free(count, q);
// print the performance results
if (passed && print_stats) {
// NOTE: when run in emulation, these results do not accurately represent
// the performance of the kernels on real FPGA hardware
double avg_time_ms;
if (runs > 1) {
avg_time_ms = std::accumulate(time_ms.begin() + 1, time_ms.end(), 0.0) /
(runs - 1);
} else {
avg_time_ms = time_ms[0];
}
double compression_ratio = (double)(count_h) / (double)(in_count);
// the number of input and output megabytes, respectively
size_t out_mb = count_h * sizeof(unsigned char) * 1e-6;
std::cout << "Execution time: " << avg_time_ms << " ms\n";
std::cout << "Output Throughput: " << (out_mb / (avg_time_ms * 1e-3))
<< " MB/s\n";
std::cout << "Compression Ratio: " << compression_ratio << ":1"
<< "\n";
}
if (passed) {
return out_bytes;
} else {
return {};
}
}
};
#endif /* __GZIP_DECOMPRESSOR_HPP__ */
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/snappy/snappy_data_gen.hpp | #ifndef __SNAPPY_DATA_GEN_HPP__
#define __SNAPPY_DATA_GEN_HPP__
#include <vector>
//
// A function to generate compressed Snappy data for testing purposes.
// Generates a file as follows:
// 'num_lit_strs' literal strings of length 'lit_str_len'
// 'num_copies' of length 'copy_len' and offset max(16k, lit_str_len)
// 'repeat' copies of the above.
//
std::vector<unsigned char> GenerateSnappyCompressedData(unsigned lit_str_len,
unsigned num_lit_strs,
unsigned copy_len,
unsigned num_copies,
unsigned repeats) {
// error checking the input arguments
if (lit_str_len <= 0) {
std::cerr << "ERROR: 'lit_str_len' must be greater than 0" << std::endl;
std::terminate();
}
if (num_lit_strs <= 0) {
std::cerr << "ERROR: 'num_lit_strs' must be greater than 0" << std::endl;
std::terminate();
}
if (copy_len > 64) {
std::cerr << "ERROR: 'copy_len' must be less than or equal to 64"
<< std::endl;
std::terminate();
}
if (repeats <= 0) {
std::cerr << "ERROR: 'repeats' must be greater than 0" << std::endl;
std::terminate();
}
if (num_copies > 0 && copy_len <= 0) {
std::cerr << "ERROR: if 'num_copies' is non-zero, then 'copy_len' must be "
<< "greater than 0" << std::endl;
std::terminate();
}
// the expected uncompressed length
unsigned uncompressed_length =
(lit_str_len * num_lit_strs + copy_len * num_copies) * repeats;
std::vector<unsigned char> ret;
// the "smart" data we will fill our dummy buffer with ... ;)
constexpr unsigned char dummy_alphabet[] = {'I', 'N', 'T', 'E', 'L'};
constexpr unsigned dummy_alphabet_count =
sizeof(dummy_alphabet) / sizeof(dummy_alphabet[0]);
// lambda to convert an unsigned int to a byte array
auto unsigned_to_byte_array = [](unsigned val) {
std::vector<unsigned char> arr(4);
for (int i = 0; i < 4; i++) {
arr[i] = (val >> (i * 8)) & 0xFF;
}
return arr;
};
// generate the preamble: the uncompressed length varint
// see the README for more information on what a varint is
unsigned uncompressed_length_bytes = 0;
unsigned uncompressed_length_varint = 0;
while (uncompressed_length != 0) {
auto data = uncompressed_length & 0x7F;
auto uncompressed_length_next = uncompressed_length >> 7;
unsigned more_bytes = (uncompressed_length_next != 0) ? 1 : 0;
auto mask = (more_bytes << 7) | data;
uncompressed_length_varint |= mask << (uncompressed_length_bytes * 8);
uncompressed_length_bytes++;
uncompressed_length = uncompressed_length_next;
}
// error check the result converting the uncompressed length to a varint
if (uncompressed_length_bytes > 5) {
std::cerr << "ERROR: generating the preamble, uncompressed_length_bytes = "
<< uncompressed_length_bytes << "\n";
std::terminate();
}
if (uncompressed_length_varint <= 0) {
std::cerr << "ERROR: generating the preamble, uncompressed_length_varint = "
<< uncompressed_length_varint << "\n";
std::terminate();
}
// convert the varint to an array of bytes and add them to the output
auto uncompressed_length_varint_bytes =
unsigned_to_byte_array(uncompressed_length_varint);
for (int i = 0; i < uncompressed_length_bytes; i++) {
ret.push_back(uncompressed_length_varint_bytes[i]);
}
// determine the literal string and copy tag byte once, since it won't change
// across the 'repeats' iterations of the loop to generate the data
constexpr unsigned char lit_str_tag = 0;
unsigned lit_str_byte_count = 0;
unsigned char lit_str_bytes[5];
if (lit_str_len <= 60) {
// write the literal string tag byte
lit_str_bytes[0] = ((lit_str_len - 1) << 2) | lit_str_tag;
lit_str_byte_count = 1;
} else {
// how many bytes are needed to store the literal length
unsigned lit_str_extra_byte_count = 1;
while ((1 << (lit_str_extra_byte_count * 8)) < lit_str_len) {
lit_str_extra_byte_count += 1;
}
// store the tag byte
auto length_bytes_mask = 60 + lit_str_extra_byte_count - 1;
lit_str_bytes[0] = (length_bytes_mask << 2) | lit_str_tag;
// store the extra bytes
auto lit_len_byte_array = unsigned_to_byte_array(lit_str_len - 1);
for (int j = 0; j < lit_str_extra_byte_count; j++) {
lit_str_bytes[j + 1] = lit_len_byte_array[j];
}
lit_str_byte_count = lit_str_extra_byte_count + 1;
}
// generate the compressed data
for (int i = 0; i < repeats; i++) {
// literal strings
for (int j = 0; j < num_lit_strs; j++) {
// write the literal tag byte and optional extra bytes for the length
for (int k = 0; k < lit_str_byte_count; k++) {
ret.push_back(lit_str_bytes[k]);
}
// write the literals following the literal tag byte
for (int k = 0; k < lit_str_len; k++) {
ret.push_back(dummy_alphabet[k % dummy_alphabet_count]);
}
}
// copies
for (int j = 0; j < num_copies; j++) {
// the copy tag byte (always 2 byte copies)
constexpr unsigned char copy_tag_type = 2;
unsigned char tag_byte = ((copy_len - 1) << 2) | copy_tag_type;
ret.push_back(tag_byte);
// the extra 2 bytes for the offset
unsigned offset = std::min(16383U, lit_str_len - 1);
auto offset_bytes = unsigned_to_byte_array(offset);
ret.push_back(offset_bytes[1]);
ret.push_back(offset_bytes[0]);
}
}
return ret;
}
#endif /* __SNAPPY_DATA_GEN_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/snappy/byte_stream.hpp | #ifndef __BYTE_STREAM_HPP__
#define __BYTE_STREAM_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../common/common.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
//
// A stream of bytes with capacity 'num_bytes'. This class allows multiple
// bytes to be read/consumed at once (0 to 'max_shift') and multiple bytes
// to be written in at once.
//
// Template parameters:
// num_bytes: the number of bytes to store in the stream
// max_shift: the maximum number of bytes consumed per call to
// Shift(ShiftCountT)
//
template <int num_bytes, int max_shift>
class ByteStream {
// static asserts to check the template arguments
static_assert(num_bytes > 0);
static_assert(max_shift > 0);
static_assert(max_shift <= num_bytes);
// the number of bits used to count from 0 to 'num_bytes', inclusive
static constexpr int count_bits = fpga_tools::Log2(num_bytes) + 1;
using CountT = ac_uint<count_bits>;
// the number of bits used to count from 0 to 'max_shift', inclusive
static constexpr int shift_count_bits = fpga_tools::Log2(max_shift) + 1;
using ShiftCountT = ac_uint<shift_count_bits>;
public:
ByteStream() : count_(0), space_(num_bytes) {}
auto Count() { return count_; }
auto Space() { return space_; }
//
// write in a new byte
//
void Write(const unsigned char& b) {
data_[count_] = b;
count_ += decltype(count_)(1);
space_ -= decltype(space_)(1);
}
//
// write in 'write_n' new bytes
//
template <size_t write_n>
void Write(const ByteSet<write_n>& b) {
static_assert(write_n < num_bytes);
#pragma unroll
for (int i = 0; i < write_n; i++) {
data_[count_ + i] = b.byte[i];
}
count_ += decltype(count_)(write_n);
space_ -= decltype(space_)(write_n);
}
//
// write in 'write_n' new bytes
//
template <size_t write_n>
void Write(const BytePack<write_n>& b) {
static_assert(write_n < num_bytes);
#pragma unroll
for (int i = 0; i < write_n; i++) {
if (i < b.valid_count) {
data_[count_ + i] = b.byte[i];
}
}
count_ += decltype(count_)(write_n);
space_ -= decltype(space_)(write_n);
}
//
// read the first element
//
auto Read() const { return data_[0]; }
//
// read the first 'read_n' elements
template <int read_n>
auto Read() const {
ByteSet<read_n> ret;
#pragma unroll
for (int i = 0; i < read_n; i++) {
ret.byte[i] = data_[i];
}
return ret;
}
//
// shift the stream by 1 element
//
void Shift() {
#pragma unroll
for (int i = 0; i < num_bytes - 1; i++) {
data_[i] = data_[i + 1];
}
count_ -= decltype(count_)(1);
space_ += decltype(space_)(1);
}
//
// shift the stream by 's' elements
//
template <int s>
void Shift() {
static_assert(s <= num_bytes);
static_assert(s <= max_shift);
#pragma unroll
for (int i = 0; i < num_bytes - s - 1; i++) {
data_[i] = data_[i + s];
}
count_ -= decltype(count_)(s);
space_ += decltype(space_)(s);
}
//
// shift the stream by 's' elements
//
void Shift(ShiftCountT s) {
#pragma unroll
for (int i = 0; i < num_bytes - 1; i++) {
// by adding 'max_shift' extra elements to 'data_', we can avoid adding
// the 'if (s + i < num_bytes)' condition here
data_[i] = data_[i + s];
}
count_ -= decltype(count_)(s);
space_ += decltype(space_)(s);
}
private:
unsigned char data_[num_bytes + max_shift];
CountT count_, space_;
};
#endif /* __BYTE_STREAM_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/snappy/snappy_decompressor.hpp | #ifndef __SNAPPY_DECOMPRESSOR_HPP__
#define __SNAPPY_DECOMPRESSOR_HPP__
#include <sycl/sycl.hpp>
#include <chrono>
#include <optional>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "../common/byte_stacker.hpp"
#include "../common/common.hpp"
#include "../common/lz77_decoder.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "metaprogramming_utils.hpp" // included from ../../../../include
#include "snappy_reader.hpp"
// declare the kernel and pipe names globally to reduce name mangling
class SnappyReaderKernelID;
class LZ77DecoderKernelID;
class ByteStackerKernelID;
class SnappyReaderToLZ77PipeID;
class LZ77ToByteStackerPipeID;
//
// Submits the kernels for the Snappy decompression engine and returns a list of
// SYCL events from each kernel launch.
//
// Template parameters:
// InPipe: the input pipe that streams in compressed data,
// 'literals_per_cycle' byte at a time
// OutPipe: the output pipe that streams out decompressed data,
// 'literals_per_cycle' at a time
// literals_per_cycle: the number of literals streamed out the output stream.
// This sets how many literals can be read from the input stream at once,
// as well as the number that can be read at once from the history buffer
// in the LZ77 decoder.
//
// Arguments:
// q: the SYCL queue
// in_count: the number of compressed bytes
// preamble_count: an output buffer for the uncompressed size read in the
// Snappy preamble
//
template <typename InPipe, typename OutPipe, unsigned literals_per_cycle>
std::vector<sycl::event> SubmitSnappyDecompressKernels(
sycl::queue& q, unsigned in_count, unsigned* preamble_count) {
// check that the input and output pipe types are actually pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// 'literals_per_cycle' must be greater than 0 and a power of 2
static_assert(literals_per_cycle > 0);
static_assert(fpga_tools::IsPow2(literals_per_cycle));
// the inter-kernel pipes for the snappy decompression engine
constexpr int SnappyReaderToLZ77PipeDepth = 16;
using SnappyReaderToLZ77Pipe = sycl::ext::intel::pipe<
SnappyReaderToLZ77PipeID,
FlagBundle<SnappyLZ77InputData<literals_per_cycle>>,
SnappyReaderToLZ77PipeDepth>;
auto snappy_reader_event =
SubmitSnappyReader<SnappyReaderKernelID, InPipe, SnappyReaderToLZ77Pipe,
literals_per_cycle>(q, in_count, preamble_count);
// the design only needs a ByteStacker kernel when literals_per_cycle > 1
if constexpr (literals_per_cycle > 1) {
using LZ77ToByteStackerPipe =
sycl::ext::intel::pipe<LZ77ToByteStackerPipeID,
FlagBundle<BytePack<literals_per_cycle>>>;
auto lz77_event =
SubmitLZ77Decoder<LZ77DecoderKernelID, SnappyReaderToLZ77Pipe,
LZ77ToByteStackerPipe, literals_per_cycle,
kSnappyMaxLZ77Distance, kSnappyMaxLZ77Length>(q);
auto byte_stacker_event =
SubmitByteStacker<ByteStackerKernelID, LZ77ToByteStackerPipe, OutPipe,
literals_per_cycle>(q);
return {snappy_reader_event, lz77_event, byte_stacker_event};
} else {
auto lz77_event =
SubmitLZ77Decoder<LZ77DecoderKernelID, SnappyReaderToLZ77Pipe, OutPipe,
literals_per_cycle, kSnappyMaxLZ77Distance,
kSnappyMaxLZ77Length>(q);
return {snappy_reader_event, lz77_event};
}
}
// declare kernel and pipe names at the global scope to reduce name mangling
class ProducerId;
class ConsumerId;
class InPipeId;
class OutPipeId;
// the input and output pipe
using InPipe = sycl::ext::intel::pipe<InPipeId, ByteSet<kLiteralsPerCycle>>;
using OutPipe =
sycl::ext::intel::pipe<OutPipeId, FlagBundle<BytePack<kLiteralsPerCycle>>>;
//
// The SNAPPY decompressor. See ../common/common.hpp for more information.
//
template <unsigned literals_per_cycle>
class SnappyDecompressor : public DecompressorBase {
public:
std::optional<std::vector<unsigned char>> DecompressBytes(
sycl::queue& q, std::vector<unsigned char>& in_bytes, int runs,
bool print_stats) {
bool passed = true;
unsigned in_count = in_bytes.size();
int in_count_padded =
fpga_tools::RoundUpToMultiple(in_count, kLiteralsPerCycle);
// read the expected output size from the start of the file
// this is used to size the output buffer
unsigned out_count = 0;
unsigned byte_idx = 0;
unsigned shift = 0;
bool keep_reading_preamble = true;
while (keep_reading_preamble) {
if (byte_idx > 4) {
std::cerr << "ERROR: uncompressed length should not span more than 5"
<< " bytes\n";
std::terminate();
}
auto b = in_bytes[byte_idx];
keep_reading_preamble = (b >> 7) & 0x1;
out_count |= (b & 0x7F) << shift;
shift += 7;
byte_idx += 1;
}
std::vector<unsigned char> out_bytes(out_count);
// round up the output count to the nearest multiple of kLiteralsPerCycle
// this allows to ignore predicating the last writes to the output
int out_count_padded =
fpga_tools::RoundUpToMultiple(out_count, kLiteralsPerCycle);
// host variables for output from device
unsigned preamble_count_host;
// track timing information in ms
std::vector<double> time(runs);
// input and output data pointers on the device using USM device allocations
unsigned char *in, *out;
unsigned* preamble_count;
try {
#if defined (IS_BSP)
// allocate memory on the device for the input and output
if ((in = sycl::malloc_device<unsigned char>(in_count_padded, q)) ==
nullptr) {
std::cerr << "ERROR: could not allocate space for 'in'\n";
std::terminate();
}
if ((out = sycl::malloc_device<unsigned char>(out_count_padded, q)) ==
nullptr) {
std::cerr << "ERROR: could not allocate space for 'out'\n";
std::terminate();
}
if ((preamble_count = sycl::malloc_device<unsigned>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'preamble_count'\n";
std::terminate();
}
#else
// allocate shared memory
if ((in = sycl::malloc_shared<unsigned char>(in_count_padded, q)) ==
nullptr) {
std::cerr << "ERROR: could not allocate space for 'in'\n";
std::terminate();
}
if ((out = sycl::malloc_shared<unsigned char>(out_count_padded, q)) ==
nullptr) {
std::cerr << "ERROR: could not allocate space for 'out'\n";
std::terminate();
}
if ((preamble_count = sycl::malloc_shared<unsigned>(1, q)) == nullptr) {
std::cerr << "ERROR: could not allocate space for 'preamble_count'\n";
std::terminate();
}
#endif
// copy the input data to the device memory and wait for the copy to
// finish
q.memcpy(in, in_bytes.data(), in_count * sizeof(unsigned char)).wait();
// run the design multiple times to increase the accuracy of the timing
for (int i = 0; i < runs; i++) {
std::cout << "Launching kernels for run " << i << std::endl;
// run the producer and consumer kernels
auto producer_event =
SubmitProducer<ProducerId, InPipe, literals_per_cycle>(
q, in_count_padded, in);
auto consumer_event =
SubmitConsumer<ConsumerId, OutPipe, literals_per_cycle>(
q, out_count_padded, out);
// run the decompression kernels
auto snappy_decompress_events =
SubmitSnappyDecompressKernels<InPipe, OutPipe, kLiteralsPerCycle>(
q, in_count, preamble_count);
// wait for the producer and consumer to finish
auto s = std::chrono::high_resolution_clock::now();
producer_event.wait();
consumer_event.wait();
auto e = std::chrono::high_resolution_clock::now();
// wait for the decompression kernels to finish
for (auto& e : snappy_decompress_events) {
e.wait();
}
std::cout << "All kernels finished for run " << i << std::endl;
// calculate the time the kernels ran for, in milliseconds
time[i] = std::chrono::duration<double, std::milli>(e - s).count();
// Copy the output back from the device
q.memcpy(out_bytes.data(), out, out_count * sizeof(unsigned char))
.wait();
q.memcpy(&preamble_count_host, preamble_count, sizeof(int)).wait();
// validating the output
// check the number of bytes we read
if (preamble_count_host != out_count) {
std::cerr << "ERROR: Out counts do not match: " << preamble_count_host
<< " != " << out_count
<< " (preamble_count_host != out_count)\n";
passed = false;
}
}
} catch (sycl::exception const& e) {
std::cout << "Caught a synchronous SYCL exception: " << e.what() << "\n";
std::terminate();
}
// free the allocated device memory
sycl::free(in, q);
sycl::free(out, q);
sycl::free(preamble_count, q);
// print the performance results
if (passed && print_stats) {
// NOTE: when run in emulation, these results do not accurately represent
// the performance of the kernels on real FPGA hardware
double avg_time_ms;
if (runs > 1) {
avg_time_ms =
std::accumulate(time.begin() + 1, time.end(), 0.0) / (runs - 1);
} else {
avg_time_ms = time[0];
}
double compression_ratio =
(double)(preamble_count_host) / (double)(in_count);
// the number of input and output megabytes, respectively
size_t out_mb = preamble_count_host * sizeof(unsigned char) * 1e-6;
std::cout << "Execution time: " << avg_time_ms << " ms\n";
std::cout << "Output Throughput: " << (out_mb / (avg_time_ms * 1e-3))
<< " MB/s\n";
std::cout << "Compression Ratio: " << compression_ratio << ":1"
<< "\n";
}
if (passed) {
return out_bytes;
} else {
return {};
}
}
};
#endif /* __SNAPPY_DECOMPRESSOR_HPP__ */
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/decompress/src/snappy/snappy_reader.hpp | #ifndef __SNAPPY_READER_HPP__
#define __SNAPPY_READER_HPP__
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/ac_types/ac_int.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include "byte_stream.hpp"
#include "constexpr_math.hpp" // included from ../../../../include
#include "metaprogramming_utils.hpp" // included from ../../../../include
//
// Streams in bytes from InPipe 'literals_per_cycle' at a time and
// generates LZ77InputData (see ../common/common.hpp) to the OutPipe for the
// LZ77Decoder kernel.
//
// Template parameters:
// InPipe: a SYCL pipe that streams in compressed Snappy data,
// 'literals_per_cycle' bytes at a time.
// OutPipe: a SYCL pipe that streams out either an array of literals with
// a valid count (when reading a literal string) or a {length, distance}
// pair (when doing a copy), in the form of LZ77InputData data.
// This is the input the LZ77 decoder.
// literals_per_cycle: the maximum number of literals read from the input
// (and written to the output) at once.
//
// Arguments:
// in_count: the number of compressed bytes
//
template <typename InPipe, typename OutPipe, unsigned literals_per_cycle>
unsigned SnappyReader(unsigned in_count) {
// ensure the InPipe and OutPipe are SYCL pipes
static_assert(fpga_tools::is_sycl_pipe_v<InPipe>);
static_assert(fpga_tools::is_sycl_pipe_v<OutPipe>);
// the input and output pipe data types
using InPipeBundleT = decltype(InPipe::read());
using OutPipeBundleT = decltype(OutPipe::read());
// make sure the input and output types are correct
static_assert(std::is_same_v<InPipeBundleT, ByteSet<literals_per_cycle>>);
static_assert(
std::is_same_v<OutPipeBundleT,
FlagBundle<SnappyLZ77InputData<literals_per_cycle>>>);
// the number of bits to count to 'literals_per_cycle'
constexpr unsigned literals_per_cycle_bits =
fpga_tools::Log2(literals_per_cycle) + 1;
// the maximum number of bytes to read at once is max(literals_per_cycle, 5),
// cases:
// - reading the preamble length is 1...5 bytes
// - reading a literal length can be 1...5 bytes
// - reading a copy command can be 1...5 bytes
// - reading literals can be 1...literals_per_cycle bytes
constexpr unsigned kMaxReadBytes = fpga_tools::Max(literals_per_cycle, 5U);
// the stream size should be double the maximum bytes we will need on each
// iteration so that we always have bytes ready
// the maximum number of bytes we need is the maximum of 5 and
// literals_per_cycle
constexpr unsigned kByteStreamSize = kMaxReadBytes * 2;
// the byte stream
using ByteStreamT = ByteStream<kByteStreamSize, kMaxReadBytes>;
ByteStreamT byte_stream;
unsigned data_read_in_preamble = 0;
// the first 1...5 bytes indicate the number of bytes in the stream
bool reading_preamble = true;
unsigned preamble_count_local = 0;
ac_uint<3> bytes_processed_in_preamble;
// NOTE: this loop is expected to have a trip count of ~1-5 iterations and
// therefore is not a performance critical loop. However, the compiler doesn't
// know that and tries to optimize for throughput (~Fmax/II). We don't want
// this loop to be our Fmax bottleneck, so increase the II.
[[intel::initiation_interval(3)]] // NO-FORMAT: Attribute
while (reading_preamble) {
if (byte_stream.Space() >= literals_per_cycle) {
bool valid_read;
auto pipe_data = InPipe::read(valid_read);
if (valid_read) {
byte_stream.template Write(pipe_data);
data_read_in_preamble += literals_per_cycle;
}
}
if (byte_stream.Count() >= 5) {
// grab the 5 bytes
auto first_five_bytes = byte_stream.template Read<5>();
// the uncompressed length is in the range [0, 2^32) and is encoded with
// a varint between 1 to 5 bytes. the top bit of each byte indicates
// whether to keep reading, and the bottom seven bits are data.
// For example, a length of 64 is encoded with 0x40, and a length
// of 2097150 (0x1FFFFE) would be stored as 0xFE 0xFF 0x7F.
// Below, we are grabbing the "keep going" bit (the MSB) and the data
// bits (the 7 LSBs).
ac_uint<1> first_five_bytes_use_bits[5];
ac_uint<7> first_five_bytes_data[5];
#pragma unroll
for (int i = 0; i < 5; i++) {
auto b = first_five_bytes.byte[i];
first_five_bytes_use_bits[i] = (b >> 7) & 0x1;
first_five_bytes_data[i] = b & 0x7F;
}
// Now, we build the 5 possible uncompressed lengths assuming we use
// 1 to 5 of the bytes
unsigned preamble_counts[5];
#pragma unroll
for (int i = 0; i < 5; i++) {
preamble_counts[i] = 0;
#pragma unroll
for (int j = 0; j < i + 1; j++) {
preamble_counts[i] |= first_five_bytes_data[j].to_uint() << (j * 7);
}
}
// now select the actual uncompressed length by checking the
// "keep going" bit of each byte
if (first_five_bytes_use_bits[0] == 0) {
bytes_processed_in_preamble = 1;
preamble_count_local = preamble_counts[0];
} else if (first_five_bytes_use_bits[1] == 0) {
bytes_processed_in_preamble = 2;
preamble_count_local = preamble_counts[1];
} else if (first_five_bytes_use_bits[2] == 0) {
bytes_processed_in_preamble = 3;
preamble_count_local = preamble_counts[2];
} else if (first_five_bytes_use_bits[3] == 0) {
bytes_processed_in_preamble = 4;
preamble_count_local = preamble_counts[3];
} else {
bytes_processed_in_preamble = 5;
preamble_count_local = preamble_counts[4];
}
// shift the byte stream by however many we used and flag that we
// are done reading the preamble
byte_stream.Shift(bytes_processed_in_preamble);
reading_preamble = false;
}
}
// are we reading a literal and how many more literals do we have to read
bool reading_literal = false;
unsigned literal_len_counter;
unsigned data_read = data_read_in_preamble;
bool all_data_read = data_read >= in_count;
bool all_data_read_next = data_read >= (in_count - literals_per_cycle);
// keep track of the number of bytes processed
constexpr unsigned max_bytes_processed_inc =
fpga_tools::Max((unsigned)5, literals_per_cycle);
unsigned bytes_processed_next[max_bytes_processed_inc + 1];
bool bytes_processed_in_range = true;
bool bytes_processed_in_range_next[max_bytes_processed_inc + 1];
#pragma unroll
for (int i = 0; i < max_bytes_processed_inc + 1; i++) {
bytes_processed_next[i] = bytes_processed_in_preamble + i;
bytes_processed_in_range_next[i] =
bytes_processed_in_preamble + i < in_count;
}
// the output data
bool out_ready = false;
SnappyLZ77InputData<literals_per_cycle> out_data;
// main processing loop
// keep going while there is input to read, or data to read from byte_stream
while (bytes_processed_in_range) {
// grab new bytes if there is space
if (byte_stream.Space() >= literals_per_cycle) {
bool valid_read;
auto pipe_data = InPipe::read(valid_read);
if (valid_read) {
byte_stream.template Write(pipe_data);
data_read += literals_per_cycle;
all_data_read = all_data_read_next;
all_data_read_next = data_read >= (in_count - literals_per_cycle);
}
}
if (!reading_literal) {
// finding the next command, which is either a literal string
// or copy command. We will need at most 5 bytes to get the command
// in a single iteration, so make sure we have enough bytes to do so
if (byte_stream.Count() >= 5 || all_data_read) {
// grab the next 5 bytes
auto five_bytes = byte_stream.template Read<5>();
// what type of command is this, literal or copy?
ac_uint<8> first_byte(five_bytes.byte[0]);
ac_uint<2> first_byte_type = first_byte.slc<2>(0);
ac_uint<6> first_byte_data = first_byte.slc<6>(2);
//////////////////////////////////////////////////////////////////////
// Assuming the command is a literal length
// find all possible literal lengths, which could require 0 to 4 more
// bytes
ac_uint<6> literal_len_without_extra_bytes = first_byte_data;
// if the len >= 60, then the number of extra bytes to read for the
// length = len - 60 + 1 (1 to 4 bytes). So grab the low 2 bits of the
// data from the first byte that, if the literal len >= 60, will be
// the number of extra bytes to read minus 1 (to save on the number
// of bits to store it, we will add 1 later).
ac_uint<2> literal_len_extra_bytes = first_byte_data.slc<2>(0);
// find all the possible literal lengths assuming we need to read 1 to
// 4 more bytes
unsigned literal_len_with_bytes[4];
#pragma unroll
for (int i = 0; i < 4; i++) {
literal_len_with_bytes[i] = 0;
#pragma unroll
for (int j = 0; j < i + 1; j++) {
literal_len_with_bytes[i] |= (unsigned)(five_bytes.byte[j + 1])
<< (j * 8);
}
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Assuming the command is a copy
// find all possible copy lengths and offset combinations
// a type 1 copy stores len-4 in the bits 4...2 of the first byte
ac_uint<4> copy_len_type1 = first_byte_data.slc<3>(0) + ac_uint<3>(4);
// a type 2 and 3 copy store len-1 in the top 6 bits of the first byte
ac_uint<7> copy_len_type2_and_type3 = first_byte_data + ac_uint<1>(1);
// a type 1 copy uses an 11 bit offset, with the high high 3 bits
// being the bits 7...5 of the first byte, along with the next byte
unsigned copy_offset_extra_bits_type1 =
first_byte_data.slc<3>(3).to_uint();
unsigned copy_offset_type1 = (copy_offset_extra_bits_type1 << 8) |
(unsigned)(five_bytes.byte[1]);
// type 2 and 3 copies use 16-bit and 32-bit offsets, respectively.
// they are little-endian integers using the next 2 or 4 bytes,
// respectively
unsigned copy_offset_type2 = (unsigned)(five_bytes.byte[2]) << 8 |
(unsigned)(five_bytes.byte[1]);
unsigned copy_offset_type3 = 0;
#pragma unroll
for (int i = 0; i < 4; i++) {
copy_offset_type3 |= (unsigned)(five_bytes.byte[5 - i - 1])
<< (8 * i);
}
//////////////////////////////////////////////////////////////////////
// we have now built up every combination of commands for literals
// and copies. Now it is time to figure out what we are actually doing
ac_uint<3> bytes_used;
if (first_byte_type == 0) {
// LITERAL
// calculate the literal length
if (literal_len_without_extra_bytes < 60) {
literal_len_counter =
literal_len_without_extra_bytes + ac_uint<1>(1);
bytes_used = 1;
} else {
literal_len_counter =
literal_len_with_bytes[literal_len_extra_bytes] + 1;
// extra bytes = literal_len_extra_bytes + 1, + 1 for first byte
bytes_used = literal_len_extra_bytes + ac_uint<2>(2);
}
// NOTE: could grab the extra bytes and start writing literals
// right away, but that may cost Fmax/II
out_ready = false;
reading_literal = true;
} else if (first_byte_type == 1) {
// COPY: with 1 extra byte for offset
out_data.is_literal = false;
out_data.length = copy_len_type1;
out_data.distance = copy_offset_type1;
bytes_used = 2;
out_ready = true;
} else if (first_byte_type == 2) {
// COPY: with 2 extra bytes for offset
out_data.is_literal = false;
out_data.length = copy_len_type2_and_type3;
out_data.distance = copy_offset_type2;
bytes_used = 3;
out_ready = true;
} else { // first_byte_type == 3
// COPY: with 4 extra bytes for offset
out_data.is_literal = false;
out_data.length = copy_len_type2_and_type3;
out_data.distance = copy_offset_type3;
bytes_used = 5;
out_ready = true;
}
// shift by however many bytes we used
byte_stream.Shift(bytes_used);
auto bytes_processed_next_val = bytes_processed_next[bytes_used];
bytes_processed_in_range = bytes_processed_in_range_next[bytes_used];
#pragma unroll
for (int i = 0; i < max_bytes_processed_inc + 1; i++) {
bytes_processed_next[i] = bytes_processed_next_val + i;
bytes_processed_in_range_next[i] =
bytes_processed_next_val < in_count - i;
}
}
} else {
// reading a string of literals so figure out how many literals to read
// in this iteration (in range [1, literals_per_cycle]) and whether we
// should keep reading literals next iteration (if we do end up reading
// the literals this iteration)
ac_uint<literals_per_cycle_bits> amount_to_read;
bool still_reading_literal;
if (literal_len_counter < literals_per_cycle) {
amount_to_read = literal_len_counter;
still_reading_literal = false;
} else {
amount_to_read = literals_per_cycle;
still_reading_literal = (literal_len_counter != literals_per_cycle);
}
// reading literals from input stream
if (byte_stream.Count() >= amount_to_read) {
// figure out how many literals will be valid
// we can always subtract by 'literals_per_cycle' since this will only
// go negative on the last iteration, which we detect with
// 'still_reading_literal'
literal_len_counter -= literals_per_cycle;
// whether to keep reading the literals
reading_literal = still_reading_literal;
// read the literals (we know we have enough)
auto literals = byte_stream.template Read<literals_per_cycle>();
// build the output data
out_data.is_literal = true;
out_data.valid_count = amount_to_read;
#pragma unroll
for (int i = 0; i < literals_per_cycle; i++) {
out_data.literal[i] = literals.byte[i];
}
out_ready = true;
// shift the byte stream by however many (valid) literals we wrote
byte_stream.Shift(amount_to_read);
auto bytes_processed_next_val = bytes_processed_next[amount_to_read];
bytes_processed_in_range =
bytes_processed_in_range_next[amount_to_read];
#pragma unroll
for (int i = 0; i < max_bytes_processed_inc + 1; i++) {
bytes_processed_next[i] = bytes_processed_next_val + i;
bytes_processed_in_range_next[i] =
bytes_processed_next_val < in_count - i;
}
}
}
// write the output
if (out_ready) {
OutPipe::write(OutPipeBundleT(out_data));
out_ready = false;
}
}
// notify downstream that we are done
OutPipe::write(OutPipeBundleT(true));
// return the preamble count
return preamble_count_local;
}
template <typename Id, typename InPipe, typename OutPipe,
unsigned literals_per_cycle>
sycl::event SubmitSnappyReader(sycl::queue& q, unsigned in_count,
unsigned* preamble_count_ptr) {
return q.single_task<Id>([=] {
#if defined (IS_BSP)
// When targeting a BSP, we instruct the compiler that this pointer
// lives on the device.
// Knowing this, the compiler won't generate hardware to
// potentially get data from the host.
sycl::device_ptr<unsigned> preamble_count(preamble_count_ptr);
#else
// Device pointers are not supported when targeting an FPGA
// family/part
unsigned* preamble_count(preamble_count_ptr);
#endif
*preamble_count =
SnappyReader<InPipe, OutPipe, literals_per_cycle>(in_count);
});
}
#endif /* __SNAPPY_READER_HPP__ */ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/crr/src/main.cpp | // ==============================================================
// Copyright Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// This agreement shall be governed in all respects by the laws of the State of
// California and by the laws of the United States of America.
////////////////////////////////////////////////////////////////////////////////
//
// CRRSolver CPU/FPGA Accelerator Demo Program
//
////////////////////////////////////////////////////////////////////////////////
//
// This design implements simple Cox-Ross-Rubinstein(CRR) binomial tree model
// with Greeks for American exercise options.
//
//
// Optimization summary:
// -- Area-consuming but infrequent calculation is done on CPU.
// -- Parallelize the calculation of a single CRR.
// -- Run multiple independent CRRs in parallel.
// -- Optimized memory configurations to reduce the need for replication
// and to eliminate the need for double-pumping M20Ks.
//
// The following diagram shows the mechanism of optimizations to CRR.
//
//
// +------+ ^
// +------------>|optval| |
// | | [2] | |
// | +------+ |
// | |
// | |
// +--+---+ |
// +------------>|optval| |
// | | [1] | |
// | +--+---+ |
// | | |
// | | |
// | | | Loop4(L4)
// | | | updates
// +---+--+ +------------>+------+ | multiple
// |optval| |optval| | elements
// | [0] | | [1] | | in optval[]
// +---+--+ +------------>+------+ | simultaneously
// | | |
// | | |
// | | |
// | | |
// | +--+---+ |
// | |optval| |
// +------------>| [0] | |
// +--+---+ |
// | |
// | |
// | +------+ |
// | |optval| |
// +------------>| [0] | |
// +------+ +
//
//
//
//
// step 1 step 2
//
//
// <------------------------------------------+
// Loop3(L3) updates each level of the tree
//
//
#include <sycl/sycl.hpp>
#include <sycl/ext/intel/fpga_extensions.hpp>
#include <cstddef>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <regex>
#include <sstream>
#include <string>
#include "CRR_common.hpp"
#include "exception_handler.hpp"
using namespace std;
using namespace sycl;
class CRRSolver;
double CrrSolver(const int n_items, vector<CRRMeta> &in_params,
vector<CRRResParams> &res_params,
vector<CRRPerStepMeta> &in_params2, queue &q) {
auto start = std::chrono::steady_clock::now();
constexpr int steps = kMaxNSteps2;
const int n_crr =
(((n_items + (OUTER_UNROLL - 1)) / OUTER_UNROLL) * OUTER_UNROLL) * 3;
{
buffer<CRRMeta, 1> i_params(in_params.size());
buffer<CRRPerStepMeta, 1> a_params(in_params2.size());
buffer<CRRResParams, 1> r_params(res_params.size());
r_params.set_final_data(res_params.data());
event e;
{
// copy the input buffers
q.submit([&](handler& h) {
auto accessor_v =
i_params.template get_access<access::mode::discard_write>(h);
h.copy(in_params.data(), accessor_v);
});
q.submit([&](handler& h) {
auto accessor_v2 =
a_params.template get_access<access::mode::discard_write>(h);
h.copy(in_params2.data(), accessor_v2);
});
// start the main kernel
e = q.submit([&](handler &h) {
auto accessor_v =
i_params.template get_access<access::mode::read_write>(h);
auto accessor_v2 =
a_params.template get_access<access::mode::read_write>(h);
auto accessor_r =
r_params.template get_access<access::mode::discard_write>(h);
h.single_task<CRRSolver>([=]() [[intel::kernel_args_restrict]] {
// Kernel requires n_crr to be a multiple of OUTER_UNROLL.
// This is taken care of by the host.
const int n_crr_div = n_crr / OUTER_UNROLL;
// Outerloop counter. Use while-loop for better timing-closure
// characteristics because it tells the compiler the loop body will
// never be skipped.
int oc = 0;
do {
// Metadata of CRR problems
[[intel::fpga_register]] double u[OUTER_UNROLL];
[[intel::fpga_register]] double c1[OUTER_UNROLL];
[[intel::fpga_register]] double c2[OUTER_UNROLL];
[[intel::fpga_register]] double param_1[OUTER_UNROLL];
[[intel::fpga_register]] double param_2[OUTER_UNROLL];
[[intel::fpga_register]] short n_steps[OUTER_UNROLL];
// Current values in binomial tree. We only need to keep track of
// one level worth of data, not the entire tree.
[[intel::fpga_memory, intel::singlepump,
intel::bankwidth(sizeof(double)),
intel::numbanks(INNER_UNROLL * OUTER_UNROLL_POW2),
intel::private_copies(
8)]] double optval[kMaxNSteps3][OUTER_UNROLL_POW2];
// Initial values in binomial tree, which correspond to the last
// level of the binomial tree.
[[intel::fpga_memory, intel::singlepump,
intel::bankwidth(sizeof(double)),
intel::numbanks(INNER_UNROLL * OUTER_UNROLL_POW2),
intel::private_copies(
8)]] double init_optval[kMaxNSteps3][OUTER_UNROLL_POW2];
// u2_array pre-calculates the power function of u2.
[[intel::fpga_memory, intel::singlepump,
intel::bankwidth(sizeof(double)),
intel::numbanks(INNER_UNROLL * OUTER_UNROLL_POW2),
intel::private_copies(
8)]] double u2_array[kMaxNSteps3][OUTER_UNROLL_POW2];
// p1powu_array precalculates p1 multipy the power of u.
[[intel::fpga_memory, intel::singlepump,
intel::bankwidth(sizeof(double)),
intel::numbanks(INNER_UNROLL * OUTER_UNROLL_POW2),
intel::private_copies(
8)]] double p1powu_array[kMaxNSteps3][OUTER_UNROLL_POW2];
// n0_optval stores the binomial tree value corresponding to node 0
// of a level. This is the same as what's stored in
// optval/init_optval, but replicating this data allows us to have
// only one read port for optval and init_optval, thereby removing
// the need of double-pumping or replication. n0_optval_2 is a copy
// of n0_optval that stores the node 0 value for a specific layer of
// the tree. pgreek is the array saving values for post-calculating
// Greeks.
[[intel::fpga_register]] double n0_optval[OUTER_UNROLL];
[[intel::fpga_register]] double n0_optval_2[OUTER_UNROLL];
[[intel::fpga_register]] double pgreek[4][OUTER_UNROLL];
// L1 + L2:
// Populate init_optval -- calculate the last level of the binomial
// tree.
for (short ic = 0; ic < OUTER_UNROLL; ++ic) {
// Transfer data from DRAM to local memory or registers
const int c = oc * OUTER_UNROLL + ic;
const CRRMeta param = accessor_v[c];
u[ic] = param.u;
c1[ic] = param.c1;
c2[ic] = param.c2;
param_1[ic] = param.param_1;
param_2[ic] = param.param_2;
n_steps[ic] = param.n_steps;
for (short t = steps; t >= 0; --t) {
const ArrayEle param_array = accessor_v2[c].array_eles[t];
const double init_val = param_array.init_optval;
init_optval[t][ic] = init_val;
// n0_optval intends to store the node value at t == 0.
// Instead of qualifying this statement by an "if (t == 0)",
// which couples the loop counter to the timing path of the
// assignment, we reverse the loop direction so the last value
// stored corresponds to t == 0.
n0_optval[ic] = init_val;
// Transfer data from DRAM to local memory or registers
u2_array[t][ic] = param_array.u2;
p1powu_array[t][ic] = param_array.p1powu;
}
}
// L3:
// Update optval[] -- calculate each level of the binomial tree.
// reg[] helps to achieve updating INNER_UNROLL elements in optval[]
// simultaneously.
[[intel::disable_loop_pipelining]] // NO-FORMAT: Attribute
for (short t = 0; t <= steps - 1; ++t) {
[[intel::fpga_register]] double reg[INNER_UNROLL + 1][OUTER_UNROLL];
double val_1, val_2;
#pragma unroll
for (short ic = 0; ic < OUTER_UNROLL; ++ic) {
reg[0][ic] = n0_optval[ic];
}
// L4:
// Calculate all the elements in optval[] -- all the tree nodes
// for one level of the tree
[[intel::ivdep]] // NO-FORMAT: Attribute
for (int n = 0; n <= steps - 1 - t; n += INNER_UNROLL) {
#pragma unroll
for (short ic = 0; ic < OUTER_UNROLL; ++ic) {
#pragma unroll
for (short ri = 1; ri <= INNER_UNROLL; ++ri) {
reg[ri][ic] =
(t == 0) ? init_optval[n + ri][ic] : optval[n + ri][ic];
}
#pragma unroll
for (short ri = 0; ri < INNER_UNROLL; ++ri) {
const double val = sycl::fmax(
c1[ic] * reg[ri][ic] + c2[ic] * reg[ri + 1][ic],
p1powu_array[t][ic] * u2_array[n + ri][ic] -
param_2[ic]);
optval[n + ri][ic] = val;
if (n + ri == 0) {
n0_optval[ic] = val;
}
if (n + ri == 1) {
val_1 = val;
}
if (n + ri == 2) {
val_2 = val;
}
}
reg[0][ic] = reg[INNER_UNROLL][ic];
if (t == steps - 5) {
pgreek[3][ic] = val_2;
}
if (t == steps - 3) {
pgreek[0][ic] = n0_optval[ic];
pgreek[1][ic] = val_1;
pgreek[2][ic] = val_2;
n0_optval_2[ic] = n0_optval[ic];
}
}
}
}
// L5: transfer crr_res_params to DRAM
#pragma unroll
for (short ic = 0; ic < OUTER_UNROLL; ++ic) {
const int c = oc * OUTER_UNROLL + ic;
if (n_steps[ic] < steps) {
accessor_r[c].optval0 = n0_optval_2[ic];
} else {
accessor_r[c].optval0 = n0_optval[ic];
}
accessor_r[c].pgreek[0] = pgreek[0][ic];
accessor_r[c].pgreek[1] = pgreek[1][ic];
accessor_r[c].pgreek[2] = pgreek[2][ic];
accessor_r[c].pgreek[3] = pgreek[3][ic];
}
// Increment counters
oc += 1;
} while (oc < n_crr_div);
});
});
}
}
auto end = std::chrono::steady_clock::now();
double diff = std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count();
return diff;
}
void ReadInputFromFile(ifstream &input_file, vector<InputData> &inp) {
string line_of_args;
while (getline(input_file, line_of_args)) {
InputData temp;
istringstream line_of_args_ss(line_of_args);
line_of_args_ss >> temp.n_steps;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.cp;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.spot;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.fwd;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.strike;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.vol;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.df;
line_of_args_ss.ignore(1, ',');
line_of_args_ss >> temp.t;
inp.push_back(temp);
}
}
static string ToStringWithPrecision(const double value, const int p = 6) {
ostringstream out;
out.precision(p);
out << std::fixed << value;
return out.str();
}
void WriteOutputToFile(ofstream &output_file, const vector<OutputRes> &outp) {
size_t n = outp.size();
for (size_t i = 0; i < n; ++i) {
OutputRes temp;
temp = outp[i];
string line = ToStringWithPrecision(temp.value, 12) + " " +
ToStringWithPrecision(temp.delta, 12) + " " +
ToStringWithPrecision(temp.gamma, 12) + " " +
ToStringWithPrecision(temp.vega, 12) + " " +
ToStringWithPrecision(temp.theta, 12) + " " +
ToStringWithPrecision(temp.rho, 12) + "\n";
output_file << line;
}
}
bool FindGetArgString(const string &arg, const char *str, char *str_value,
size_t maxchars) {
size_t found = arg.find(str, 0, strlen(str));
if (found != string::npos) {
const char *sptr = &arg.c_str()[strlen(str)];
for (int i = 0; i < maxchars - 1; i++) {
char ch = sptr[i];
switch (ch) {
case ' ':
case '\t':
case '\0':
str_value[i] = 0;
return true;
break;
default:
str_value[i] = ch;
break;
}
}
return true;
}
return false;
}
// Perform data pre-processing work
// Three different option prices are required to solve each CRR problem
// The following lists why each option price is required:
// [0] : Used to compute Premium, Delta, Gamma and Theta
// [1] : Used to compute Rho
// [2] : Used to compute Vega
CRRInParams PrepareData(const InputData &inp) {
CRRInParams in_params;
in_params.n_steps = inp.n_steps;
double r[2];
r[0] = sycl::pow(inp.df, 1.0 / inp.n_steps);
double d_df = sycl::exp(-inp.t * kEpsilon);
r[1] = sycl::pow(inp.df * d_df, 1.0 / inp.n_steps);
in_params.u[0] = sycl::exp(inp.vol * sycl::sqrt(inp.t / inp.n_steps));
in_params.u[1] = in_params.u[0];
in_params.u[2] = sycl::exp((inp.vol + kEpsilon) * sycl::sqrt(inp.t / inp.n_steps));
in_params.u2[0] = in_params.u[0] * in_params.u[0];
in_params.u2[1] = in_params.u[1] * in_params.u[1];
in_params.u2[2] = in_params.u[2] * in_params.u[2];
in_params.umin[0] = inp.spot * sycl::pow(1 / in_params.u[0], inp.n_steps + kOpt0);
in_params.umin[1] = inp.spot * sycl::pow(1 / in_params.u[1], inp.n_steps);
in_params.umin[2] = inp.spot * sycl::pow(1 / in_params.u[2], inp.n_steps);
in_params.c1[0] =
r[0] * (in_params.u[0] - sycl::pow(inp.fwd / inp.spot, 1.0 / inp.n_steps)) /
(in_params.u[0] - 1 / in_params.u[0]);
in_params.c1[1] =
r[1] *(in_params.u[1] - sycl::pow((inp.fwd / d_df) / inp.spot, 1.0 / inp.n_steps)) /
(in_params.u[1] - 1 / in_params.u[1]);
in_params.c1[2] =
r[0] * (in_params.u[2] - sycl::pow(inp.fwd / inp.spot, 1.0 / inp.n_steps)) /
(in_params.u[2] - 1 / in_params.u[2]);
in_params.c2[0] = r[0] - in_params.c1[0];
in_params.c2[1] = r[1] - in_params.c1[1];
in_params.c2[2] = r[0] - in_params.c1[2];
in_params.param_1[0] = inp.cp * in_params.umin[0];
in_params.param_1[1] = inp.cp * in_params.umin[1];
in_params.param_1[2] = inp.cp * in_params.umin[2];
in_params.param_2 = inp.cp * inp.strike;
return in_params;
}
CRRArrayEles PrepareArrData(const CRRInParams &in) {
CRRArrayEles arr;
// Write in reverse t-direction to match kernel access pattern
for (int i = 0; i <= in.n_steps + kOpt0; ++i) {
for (int inner_func_index = 0; inner_func_index < 3; ++inner_func_index) {
arr.array_eles[i][inner_func_index].u2 = sycl::pow(in.u2[inner_func_index], (double) i);
arr.array_eles[i][inner_func_index].p1powu =
in.param_1[inner_func_index] * sycl::pow(in.u[inner_func_index], (double) (i + 1));
arr.array_eles[i][inner_func_index].init_optval =
sycl::fmax(in.param_1[inner_func_index] * sycl::pow(in.u2[inner_func_index], (double) i) -
in.param_2, 0.0);
}
}
return arr;
}
// Metadata, used in the Kernel, is generated from the input data
// Each CRR problem is split into 3 sub-problems to calculate
// each required option price separately
void PrepareKernelData(vector<CRRInParams> &in_params,
vector<CRRArrayEles> &array_params,
vector<CRRMeta> &in_buff_params,
vector<CRRPerStepMeta> &in_buff2_params,
const int n_crrs) {
constexpr short offset = 0;
for (int wi_idx = offset, dst = offset * 3; wi_idx < n_crrs; ++wi_idx) {
CRRInParams &src_crr_params = in_params[wi_idx];
CRRArrayEles &src_crr_eles = array_params[wi_idx];
for (int inner_func_index = 0; inner_func_index < 3;
++inner_func_index, ++dst) {
CRRMeta &dst_crr_meta = in_buff_params[dst];
CRRPerStepMeta &dst_crr_per_step_meta = in_buff2_params[dst];
dst_crr_meta.u = src_crr_params.u[inner_func_index];
dst_crr_meta.c1 = src_crr_params.c1[inner_func_index];
dst_crr_meta.c2 = src_crr_params.c2[inner_func_index];
dst_crr_meta.param_1 = src_crr_params.param_1[inner_func_index];
dst_crr_meta.param_2 = src_crr_params.param_2;
if (inner_func_index == 0) {
dst_crr_meta.n_steps = src_crr_params.n_steps + kOpt0;
} else {
dst_crr_meta.n_steps = src_crr_params.n_steps;
}
for (int i = 0; i <= kMaxNSteps2; ++i) {
dst_crr_per_step_meta.array_eles[i].u2 =
src_crr_eles.array_eles[i][inner_func_index].u2;
dst_crr_per_step_meta.array_eles[i].p1powu =
src_crr_eles.array_eles[i][inner_func_index].p1powu;
dst_crr_per_step_meta.array_eles[i].init_optval =
src_crr_eles.array_eles[i][inner_func_index].init_optval;
}
}
}
}
// Takes in the result from the kernel and stores the 3 option prices
// belonging to the same CRR problem in one InterRes element
void ProcessKernelResult(const vector<CRRResParams> &res_params,
vector<InterRes> &postp_buff, const int n_crrs) {
constexpr int offset = 0;
for (int wi_idx = offset, src = offset * 3; wi_idx < n_crrs; ++wi_idx) {
InterRes &dst_res = postp_buff[wi_idx];
for (int inner_func_index = 0; inner_func_index < 3;
++inner_func_index, ++src) {
const CRRResParams &src_res = res_params[src];
for (int i = 0; i < 4; ++i) {
if (inner_func_index == 0) {
dst_res.pgreek[i] = src_res.pgreek[i];
}
}
dst_res.vals[inner_func_index] = src_res.optval0;
}
}
}
// Computes the Premium and Greeks
OutputRes ComputeOutput(const InputData &inp, const CRRInParams &in_params,
const InterRes &res_params) {
double h;
OutputRes res;
h = inp.spot * (in_params.u2[0] - 1 / in_params.u2[0]);
res.value = res_params.pgreek[1];
res.delta = (res_params.pgreek[2] - res_params.pgreek[0]) / h;
res.gamma = 2 / h *
((res_params.pgreek[2] - res_params.pgreek[1]) / inp.spot /
(in_params.u2[0] - 1) -
(res_params.pgreek[1] - res_params.pgreek[0]) / inp.spot /
(1 - (1 / in_params.u2[0])));
res.theta =
(res_params.vals[0] - res_params.pgreek[3]) / 4 / inp.t * inp.n_steps;
res.rho = (res_params.vals[1] - res.value) / kEpsilon;
res.vega = (res_params.vals[2] - res.value) / kEpsilon;
return res;
}
// Perform CRR solving using the CPU and compare FPGA results with CPU results
// to test correctness.
void TestCorrectness(int k, int n_crrs, bool &pass, const InputData &inp,
CRRInParams &vals, const OutputRes &fpga_res) {
if (k == 0) {
std::cout << "\n============= Correctness Test ============= \n";
std::cout << "Running analytical correctness checks... \n";
}
// This CRR benchmark ensures a minimum 4 decimal points match between FPGA and CPU
// "threshold" is chosen to enforce this guarantee
float threshold = 0.00001;
int i, j, q;
double x;
int n_steps = vals.n_steps;
int m = n_steps + kOpt0;
vector<double> pvalue(kMaxNSteps3);
vector<double> pvalue_1(kMaxNSteps1);
vector<double> pvalue_2(kMaxNSteps1);
vector<double> pgreek(5);
InterRes cpu_res_params;
OutputRes cpu_res;
// option value computed at each final node
x = vals.umin[0];
for (i = 0; i <= m; i++, x *= vals.u2[0]) {
pvalue[i] = sycl::fmax(inp.cp * (x - inp.strike), 0.0);
}
// backward recursion to evaluate option price
for (i = m - 1; i >= 0; i--) {
vals.umin[0] *= vals.u[0];
x = vals.umin[0];
for (j = 0; j <= i; j++, x *= vals.u2[0]) {
pvalue[j] = sycl::fmax(vals.c1[0] * pvalue[j] + vals.c2[0] * pvalue[j + 1],
inp.cp * (x - inp.strike));
}
if (i == 4) {
pgreek[4] = pvalue[2];
}
if (i == 2) {
for (q = 0; q <= 2; q++) {
pgreek[q + 1] = pvalue[q];
}
}
}
cpu_res_params.vals[0] = pvalue[0];
// the above computation is repeated for each option price
x = vals.umin[1];
for (i = 0; i <= n_steps; i++, x *= vals.u2[1]) {
pvalue_1[i] = sycl::fmax(inp.cp * (x - inp.strike), 0.0);
}
for (i = n_steps - 1; i >= 0; i--) {
vals.umin[1] *= vals.u[1];
x = vals.umin[1];
for (j = 0; j <= i; j++, x *= vals.u2[1]) {
pvalue_1[j] =
sycl::fmax(vals.c1[1] * pvalue_1[j] + vals.c2[1] * pvalue_1[j + 1],
inp.cp * (x - inp.strike));
}
}
cpu_res_params.vals[1] = pvalue_1[0];
x = vals.umin[2];
for (i = 0; i <= n_steps; i++, x *= vals.u2[2]) {
pvalue_2[i] = sycl::fmax(inp.cp * (x - inp.strike), 0.0);
}
for (i = n_steps - 1; i >= 0; i--) {
vals.umin[2] *= vals.u[2];
x = vals.umin[2];
for (j = 0; j <= i; j++, x *= vals.u2[2]) {
pvalue_2[j] =
sycl::fmax(vals.c1[2] * pvalue_2[j] + vals.c2[2] * pvalue_2[j + 1],
inp.cp * (x - inp.strike));
}
}
cpu_res_params.vals[2] = pvalue_2[0];
pgreek[0] = 0;
for (i = 1; i < 5; ++i) {
cpu_res_params.pgreek[i - 1] = pgreek[i];
}
cpu_res = ComputeOutput(inp, vals, cpu_res_params);
if (abs(cpu_res.value - fpga_res.value) > threshold) {
pass = false;
std::cout << "fpga_res.value " << k << " = " << std::fixed
<< std::setprecision(20) << fpga_res.value << "\n";
std::cout << "cpu_res.value " << k << " = " << std::fixed
<< std::setprecision(20) << cpu_res.value << "\n";
std::cout << "Mismatch detected for value of crr " << k << "\n";
}
if (abs(cpu_res.delta - fpga_res.delta) > threshold) {
pass = false;
std::cout << "fpga_res.delta " << k << " = " << std::fixed
<< std::setprecision(20) << fpga_res.delta << "\n";
std::cout << "cpu_res.delta " << k << " = " << std::fixed
<< std::setprecision(20) << cpu_res.delta << "\n";
std::cout << "Mismatch detected for value of crr " << k << "\n";
}
if (abs(cpu_res.gamma - fpga_res.gamma) > threshold) {
pass = false;
std::cout << "fpga_res.gamma " << k << " = " << std::fixed
<< std::setprecision(20) << fpga_res.gamma << "\n";
std::cout << "cpu_res.gamma " << k << " = " << std::fixed
<< std::setprecision(20) << cpu_res.gamma << "\n";
std::cout << "Mismatch detected for value of crr " << k << "\n";
}
if (abs(cpu_res.vega - fpga_res.vega) > threshold) {
pass = false;
std::cout << "fpga_res.vega " << k << " = " << std::fixed
<< std::setprecision(20) << fpga_res.vega << "\n";
std::cout << "cpu_res.vega " << k << " = " << std::fixed
<< std::setprecision(20) << cpu_res.vega << "\n";
std::cout << "Mismatch detected for value of crr " << k << "\n";
}
if (abs(cpu_res.theta - fpga_res.theta) > threshold) {
pass = false;
std::cout << "fpga_res.theta " << k << " = " << std::fixed
<< std::setprecision(20) << fpga_res.theta << "\n";
std::cout << "cpu_res.theta " << k << " = " << std::fixed
<< std::setprecision(20) << cpu_res.theta << "\n";
std::cout << "Mismatch detected for value of crr " << k << "\n";
}
if (abs(cpu_res.rho - fpga_res.rho) > threshold) {
pass = false;
std::cout << "fpga_res.rho " << k << " = " << std::fixed
<< std::setprecision(20) << fpga_res.rho << "\n";
std::cout << "cpu_res.rho " << k << " = " << std::fixed
<< std::setprecision(20) << cpu_res.rho << "\n";
std::cout << "Mismatch detected for value of crr " << k << "\n";
}
if (k == n_crrs - 1) {
std::cout << "CPU-FPGA Equivalence: " << (pass ? "PASS" : "FAIL") << "\n";
}
}
// Print out the achieved CRR throughput
void TestThroughput(const double &time, const int &n_crrs) {
std::cout << "\n============= Throughput Test =============\n";
std::cout << " Avg throughput: " << std::fixed << std::setprecision(1)
<< (n_crrs / time) << " assets/s\n";
}
int main(int argc, char *argv[]) {
string infilename = "";
string outfilename = "";
const string default_ifile = "src/data/ordered_inputs.csv";
const string default_ofile = "src/data/ordered_outputs.csv";
char str_buffer[kMaxStringLen] = {0};
for (int i = 1; i < argc; i++) {
if (argv[i][0] == '-') {
string sarg(argv[i]);
FindGetArgString(sarg, "-o=", str_buffer, kMaxStringLen);
FindGetArgString(sarg, "--output-file=", str_buffer, kMaxStringLen);
} else {
infilename = string(argv[i]);
}
}
try {
#if FPGA_SIMULATOR
auto selector = sycl::ext::intel::fpga_simulator_selector_v;
#elif FPGA_HARDWARE
auto selector = sycl::ext::intel::fpga_selector_v;
#else // #if FPGA_EMULATOR
auto selector = sycl::ext::intel::fpga_emulator_selector_v;
#endif
queue q(selector, fpga_tools::exception_handler);
device device = q.get_device();
std::cout << "Running on device: "
<< device.get_info<info::device::name>().c_str()
<< std::endl;
vector<InputData> inp;
// Get input file name, if users don't have their test input file, this
// design will use the default input file
if (infilename == "") {
infilename = default_ifile;
}
ifstream inputFile(infilename);
if (!inputFile.is_open()) {
std::cerr << "Input file doesn't exist \n";
return 1;
}
// Check input file format
string filename = infilename;
std::size_t found = filename.find_last_of(".");
if (!(filename.substr(found + 1).compare("csv") == 0)) {
std::cerr << "Input file format only support .csv\n";
return 1;
}
// Get output file name, if users don't define output file name, the design
// will use the default output file
outfilename = default_ofile;
if (strlen(str_buffer)) {
outfilename = string(str_buffer);
}
// Check output file format
filename = outfilename;
found = filename.find_last_of(".");
if (!(filename.substr(found + 1).compare("csv") == 0)) {
std::cerr << "Output file format only support .csv\n";
return 1;
}
// Read inputs data from input file
ReadInputFromFile(inputFile, inp);
// Get the number of data from the input file
// Emulator mode only goes through one input (or through OUTER_UNROLL inputs) to
// ensure fast runtime
#if defined(FPGA_EMULATOR)
int temp_crrs = 1;
#else
int temp_crrs = inp.size();
#endif
// Check if n_crrs >= OUTER_UNROLL
if (OUTER_UNROLL >= temp_crrs) {
if (inp.size() < OUTER_UNROLL) {
std::cerr << "Input size must be greater than or equal to OUTER_UNROLL\n";
return 1;
} else {
temp_crrs = OUTER_UNROLL;
}
}
const int n_crrs = temp_crrs;
vector<CRRInParams> in_params(n_crrs);
vector<CRRArrayEles> array_params(n_crrs);
for (int j = 0; j < n_crrs; ++j) {
in_params[j] = PrepareData(inp[j]);
array_params[j] = PrepareArrData(in_params[j]);
}
// following vectors are arguments for CrrSolver
vector<CRRMeta> in_buff_params(n_crrs * 3);
vector<CRRPerStepMeta> in_buff2_params(n_crrs * 3);
// Prepare metadata as input to kernel
PrepareKernelData(in_params, array_params, in_buff_params, in_buff2_params,
n_crrs);
#ifdef FPGA_HARDWARE
// warmup run - use this run to warmup accelerator
vector<CRRResParams> res_params_dummy(n_crrs * 3);
CrrSolver(n_crrs, in_buff_params, res_params_dummy, in_buff2_params,
q);
#endif
// Timed run - profile performance
vector<CRRResParams> res_params(n_crrs * 3);
double time = CrrSolver(n_crrs, in_buff_params, res_params,
in_buff2_params, q);
bool pass = true;
// Post-processing step
// process_res used to compute final results
vector<InterRes> process_res(n_crrs);
ProcessKernelResult(res_params, process_res, n_crrs);
vector<OutputRes> result(n_crrs);
for (int i = 0; i < n_crrs; ++i) {
result[i] = ComputeOutput(inp[i], in_params[i], process_res[i]);
TestCorrectness(i, n_crrs, pass, inp[i], in_params[i], result[i]);
}
// Write outputs data to output file
ofstream outputFile(outfilename);
WriteOutputToFile(outputFile, result);
TestThroughput(time, n_crrs);
} catch (sycl::exception const &e) {
std::cerr << "Caught a synchronous SYCL exception: " << e.what() << "\n";
std::cerr << " If you are targeting an FPGA hardware, "
"ensure that your system is plugged to an FPGA board that is "
"set up correctly\n";
std::cerr << " If you are targeting the FPGA emulator, compile with "
"-DFPGA_EMULATOR\n";
return 1;
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/crr/src/CRR_common.hpp | // ==============================================================
// Copyright Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// This agreement shall be governed in all respects by the laws of the State of
// California and by the laws of the United States of America.
#ifndef __CRR_COMMON_H__
#define __CRR_COMMON_H__
constexpr int kMaxStringLen = 1024;
// Increments of kMaxNSteps
constexpr size_t kMaxNSteps = 8189;
constexpr size_t kMaxNSteps1 = 8190;
constexpr size_t kMaxNSteps2 = 8191;
constexpr size_t kMaxNSteps3 = 8192;
// Increment by a small epsilon in order to compute derivative
// of option price with respect to Vol or Interest. The derivatives
// are then used to compute Vega and Rho.
constexpr double kEpsilon = 0.0001;
// Whenever calculations are made for Option Price 0, need to increment
// nsteps by 2 to ensure all the required derivative prices are calculated.
constexpr size_t kOpt0 = 2;
// Solver configuration settings that are dependent on selected
// board. Most notable settings are:
// OUTER_UNROLL controls the number of CRRs that can be processed
// in parallel in a SIMD fashion (number of CRRS must be >= OUTER_UNROLL).
// This is ideally a power of two, but does not have to be. Since
// the DRAM bandwidth requirement is low, increasing OUTER_UNROLL
// should result in fairly linear speedup.
// INNER_UNROLL controls the degree of parallelization within
// the calculation of a single CRR. This must be a power of two. Increasing
// INNER_UNROLL has a lower area overhead than increasing OUTER_UNROLL;
// however, there are diminishing returns as INNER_UNROLL is increased with
// respect to the number of time steps.
// Data structure for original input data.
typedef struct {
int cp; /* cp = -1 or 1 for Put & Call respectively. */
double n_steps; /* n_steps = number of time steps in the binomial tree. */
double strike; /* strike = exercise price of option. */
double spot; /* spot = spot price of the underlying. */
double fwd; /* fwd = forward price of the underlying. */
double vol; /* vol = per cent volatility, input as a decimal. */
double df; /* df = discount factor to option expiry. */
double t; /* t = time in years to the maturity of the option. */
} InputData;
// Data structure as the inputs to FPGA.
// Element[i] is used to compute option_price[i].
typedef struct {
double n_steps; /* n_steps = number of time steps in the binomial tree. */
double u[3]; /* u = the increase factor of a up movement in the binomial tree,
same for each time step. */
double u2[3]; /* u2 = the square of increase factor. */
double c1[3]; /* c1 = the probability of a down movement in the binomial tree,
same for each time step. */
double c2[3]; /* c2 = the probability of a up movement in the binomial tree. */
double umin[3]; /* umin = minimum price of the underlying at the maturity. */
double param_1[3];/* param_1[i] = cp * umin[i] */
double param_2; /* param_2 = cp * strike */
} CRRInParams;
// Data structure as the output from ProcessKernelResult().
typedef struct {
double pgreek[4]; /* Stores the 4 derivative prices in the binomial tree
required to compute the Premium and Greeks. */
double vals[3]; /* Three option prices calculated */
} InterRes;
// Data structure for option price and five Greeks.
typedef struct {
double value; /* value = option price. */
double delta;
double gamma;
double vega;
double theta;
double rho;
} OutputRes;
// Data structures required by the kernel
typedef struct {
double u;
double c1;
double c2;
double param_1;
double param_2;
short n_steps;
short pad1;
int pad2;
double pad3;
double pad4;
} CRRMeta;
typedef struct {
double u2;
double p1powu;
double init_optval;
double pad;
} ArrayEle;
typedef struct {
ArrayEle array_eles[kMaxNSteps3][3]; /* Second dimension size set to 3 to have a
separate ArrayEle for each option price */
} CRRArrayEles;
typedef struct {
ArrayEle array_eles[kMaxNSteps3];
} CRRPerStepMeta;
typedef struct {
double pgreek[4];
double optval0;
double pad[3];
} CRRResParams;
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/StructuredGrids/iso3dfd_omp_offload/src/utils.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "../include/iso3dfd.h"
/*
* Host-Code
* Utility function to get input arguments
*/
void Usage(const std::string& programName) {
std::cout << "--------------------------------------\n";
std::cout << " Incorrect parameters \n";
std::cout << " Usage: ";
std::cout << programName
<< " n1 n2 n3 n1_block n2_block n3_block Iterations\n\n";
std::cout << " n1 n2 n3 : Grid sizes for the stencil\n";
std::cout << " n1_block n2_block n3_block : cache block sizes for CPU\n";
std::cout << " : TILE sizes for OMP Offload\n";
std::cout << " Iterations : No. of timesteps.\n";
std::cout << "--------------------------------------\n";
std::cout << "--------------------------------------\n";
}
/*
* Host-Code
* Function used for initialization
*/
void Initialize(float* ptr_prev, float* ptr_next, float* ptr_vel,
size_t n1, size_t n2, size_t n3) {
auto dim2 = n2 * n1;
for (auto i = 0; i < n3; i++) {
for (auto j = 0; j < n2; j++) {
auto offset = i * dim2 + j * n1;
for (auto k = 0; k < n1; k++) {
ptr_prev[offset + k] = 0.0f;
ptr_next[offset + k] = 0.0f;
ptr_vel[offset + k] =
2250000.0f * dt * dt; // Integration of the v*v and dt*dt here
}
}
}
// Then we add a source
float val = 1.f;
for (auto s = 5; s >= 0; s--) {
for (auto i = n3 / 2 - s; i < n3 / 2 + s; i++) {
for (auto j = n2 / 4 - s; j < n2 / 4 + s; j++) {
auto offset = i * dim2 + j * n1;
for (auto k = n1 / 4 - s; k < n1 / 4 + s; k++) {
ptr_prev[offset + k] = val;
}
}
}
val *= 10;
}
}
/*
* Host-Code
* Utility function to print stats
*/
void PrintStats(double time, size_t n1, size_t n2, size_t n3,
size_t num_iterations) {
float throughput_mpoints = 0.0f, mflops = 0.0f, normalized_time = 0.0f;
double mbytes = 0.0f;
normalized_time = (double)time / num_iterations;
throughput_mpoints = ((n1 - 2 * kHalfLength) * (n2 - 2 * kHalfLength) *
(n3 - 2 * kHalfLength)) /
(normalized_time * 1e3f);
mflops = (7.0f * kHalfLength + 5.0f) * throughput_mpoints;
mbytes = 12.0f * throughput_mpoints;
std::cout << "--------------------------------------\n";
std::cout << "time : " << time / 1e3f << " secs\n";
std::cout << "throughput : " << throughput_mpoints << " Mpts/s\n";
std::cout << "flops : " << mflops / 1e3f << " GFlops\n";
std::cout << "bytes : " << mbytes / 1e3f << " GBytes/s\n";
std::cout << "\n--------------------------------------\n";
std::cout << "\n--------------------------------------\n";
}
/*
* Host-Code
* Utility function to calculate L2-norm between resulting buffer and reference
* buffer
*/
bool WithinEpsilon(float* output, float* reference, size_t dim_x,
size_t dim_y, size_t dim_z, size_t radius,
const int zadjust = 0, const float delta = 0.01f) {
std::ofstream error_file;
error_file.open("error_diff.txt");
bool error = false;
double norm2 = 0;
for (auto iz = 0; iz < dim_z; iz++) {
for (auto iy = 0; iy < dim_y; iy++) {
for (auto ix = 0; ix < dim_x; ix++) {
if (ix >= radius && ix < (dim_x - radius) && iy >= radius &&
iy < (dim_y - radius) && iz >= radius &&
iz < (dim_z - radius + zadjust)) {
float difference = fabsf(*reference - *output);
norm2 += difference * difference;
if (difference > delta) {
error = true;
error_file << " ERROR: " << ix << ", " << iy << ", " << iz << " "
<< *output << " instead of " << *reference
<< " (|e|=" << difference << ")\n";
}
}
++output;
++reference;
}
}
}
error_file.close();
norm2 = sqrt(norm2);
if (error) std::cout << "error (Euclidean norm): " << norm2 << "\n";
return error;
}
/*
* Host-code
* Validate input arguments
*/
bool ValidateInput(size_t n1, size_t n2, size_t n3,
size_t n1_block, size_t n2_block,
size_t n3_block, size_t num_iterations) {
bool error = false;
if ((n1 < kHalfLength) || (n2 < kHalfLength) || (n3 < kHalfLength)) {
std::cout << "--------------------------------------\n";
std::cout << " Invalid grid size : n1, n2, n3 should be greater than "
<< kHalfLength << "\n";
error = true;
}
if ((n1_block <= 0) || (n2_block <= 0) || (n3_block <= 0)) {
std::cout << "--------------------------------------\n";
std::cout << " Invalid block sizes : n1_block, n2_block, n3_block "
"should be greater than 0\n";
error = true;
}
if (num_iterations <= 0) {
std::cout << "--------------------------------------\n";
std::cout
<< " Invalid num_iterations : Iterations should be greater than 0 \n";
error = true;
}
#if defined(USE_OPT1) || defined(USE_OPT2) || defined(USE_OPT3)
if ((n1_block * n2_block) > kMaxTeamSizeLimit) {
std::cout << "--------------------------------------\n";
std::cout << " Invalid block sizes : n1_block * n2_block "
"should be less than "
<< kMaxTeamSizeLimit << "\n";
error = true;
}
#endif
return error;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/StructuredGrids/iso3dfd_omp_offload/src/iso3dfd_verify.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "../include/iso3dfd.h"
/*
* Host-Code
* OpenMP implementation for single iteration of iso3dfd kernel.
* This function is used as reference implementation for verification and
* also to compare OpenMP performance on CPU with the OpenMP Offload version
*/
void Iso3dfdVerifyIteration(float *ptr_next_base, float *ptr_prev_base,
float *ptr_vel_base, float *coeff, int n1, int n2,
int n3, size_t n1_block,
size_t n2_block, size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto n3_end = n3 - kHalfLength;
auto n2_end = n2 - kHalfLength;
auto n1_end = n1 - kHalfLength;
#pragma omp parallel default(shared)
#pragma omp for schedule(static) collapse(3)
for (auto bz = kHalfLength; bz < n3_end; bz += n3_block) {
for (auto by = kHalfLength; by < n2_end; by += n2_block) {
for (auto bx = kHalfLength; bx < n1_end; bx += n1_block) {
auto iz_end = std::min(bz + n3_block, n3_end);
auto iy_end = std::min(by + n2_block, n2_end);
auto ix_end = std::min(n1_block, n1_end - bx);
for (auto iz = bz; iz < iz_end; iz++) {
for (auto iy = by; iy < iy_end; iy++) {
float *ptr_next = ptr_next_base + iz * dimn1n2 + iy * n1 + bx;
float *ptr_prev = ptr_prev_base + iz * dimn1n2 + iy * n1 + bx;
float *ptr_vel = ptr_vel_base + iz * dimn1n2 + iy * n1 + bx;
#pragma omp simd
for (auto ix = 0; ix < ix_end; ix++) {
float value = 0.0f;
value += ptr_prev[ix] * coeff[0];
value += STENCIL_LOOKUP(1);
value += STENCIL_LOOKUP(2);
value += STENCIL_LOOKUP(3);
value += STENCIL_LOOKUP(4);
value += STENCIL_LOOKUP(5);
value += STENCIL_LOOKUP(6);
value += STENCIL_LOOKUP(7);
value += STENCIL_LOOKUP(8);
ptr_next[ix] =
2.0f * ptr_prev[ix] - ptr_next[ix] + value * ptr_vel[ix];
}
}
} // end of inner iterations
}
}
} // end of cache blocking
}
/*
* Host-Code
* Driver function for ISO3DFD OpenMP CPU code
* Uses ptr_next and ptr_prev as ping-pong buffers to achieve
* accelerated wave propogation
*/
void Iso3dfdVerify(float *ptr_next, float *ptr_prev, float *ptr_vel,
float *coeff, size_t n1, size_t n2,
size_t n3, size_t nreps, size_t n1_block,
size_t n2_block, size_t n3_block) {
for (auto it = 0; it < nreps; it += 1) {
Iso3dfdVerifyIteration(ptr_next, ptr_prev, ptr_vel, coeff, n1, n2, n3,
n1_block, n2_block, n3_block);
// here's where boundary conditions and halo exchanges happen
// Swap previous & next between iterations
it++;
if (it < nreps)
Iso3dfdVerifyIteration(ptr_prev, ptr_next, ptr_vel, coeff, n1, n2, n3,
n1_block, n2_block, n3_block);
} // time loop
}
bool VerifyResults(float *next_base, float *prev_base, float *vel_base,
float *coeff, size_t n1, size_t n2,
size_t n3, size_t num_iterations,
size_t n1_block, size_t n2_block,
size_t n3_block) {
std::cout << "Checking Results ...\n";
size_t nsize = n1 * n2 * n3;
bool error = false;
float *temp = new float[nsize];
if (num_iterations % 2)
memcpy(temp, next_base, nsize * sizeof(float));
else
memcpy(temp, prev_base, nsize * sizeof(float));
Initialize(prev_base, next_base, vel_base, n1, n2, n3);
Iso3dfdVerify(next_base, prev_base, vel_base, coeff, n1, n2, n3,
num_iterations, n1_block, n2_block, n3_block);
if (num_iterations % 2)
error = WithinEpsilon(temp, next_base, n1, n2, n3, kHalfLength, 0, 0.1f);
else
error = WithinEpsilon(temp, prev_base, n1, n2, n3, kHalfLength, 0, 0.1f);
if (error) {
std::cout << "Final wavefields from OMP Offload device and CPU are not "
<< "equivalent: Fail\n";
} else {
std::cout << "Final wavefields from OMP Offload device and CPU are "
<< "equivalent: Success\n";
}
std::cout << "--------------------------------------\n";
delete[] temp;
return error;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/StructuredGrids/iso3dfd_omp_offload/src/iso3dfd.cpp |
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <string>
#include "../include/iso3dfd.h"
/*
* Inline device function to find minimum
*/
#pragma omp declare target
inline size_t GetMin(size_t first, size_t second) {
return ((first < second) ? first : second);
}
#pragma omp end declare target
#ifdef NO_OFFLOAD
/*
* Host-Code
* CPU implementation used to test Advisor offload modeling
* and compare OpenMP performance on CPU with the OpenMP Offload version
*/
void inline Iso3dfdIteration(float *ptr_next_base, float *ptr_prev_base,
float *ptr_vel_base, float *coeff,
const size_t n1, const size_t n2,
const size_t n3, const size_t n1_block,
const size_t n2_block,
const size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto n3_end = n3 - kHalfLength;
auto n2_end = n2 - kHalfLength;
auto n1_end = n1 - kHalfLength;
// Outer 3 loops just execute once if block sizes are same as the grid sizes,
// which is enforced here to demonstrate the baseline version.
#pragma omp parallel default(shared)
#pragma omp for schedule(static) collapse(3)
for (auto bz = kHalfLength; bz < n3_end; bz += n3_block) {
for (auto by = kHalfLength; by < n2_end; by += n2_block) {
for (auto bx = kHalfLength; bx < n1_end; bx += n1_block) {
auto iz_end = GetMin(bz + n3_block, n3_end);
auto iy_end = GetMin(by + n2_block, n2_end);
auto ix_end = GetMin(bx + n1_block, n1_end);
for (auto iz = bz; iz < iz_end; iz++) {
for (auto iy = by; iy < iy_end; iy++) {
float *ptr_next = ptr_next_base + iz * dimn1n2 + iy * n1;
float *ptr_prev = ptr_prev_base + iz * dimn1n2 + iy * n1;
float *ptr_vel = ptr_vel_base + iz * dimn1n2 + iy * n1;
#pragma omp simd
for (auto ix = bx; ix < ix_end; ix++) {
float value = ptr_prev[ix] * coeff[0];
value += STENCIL_LOOKUP(1);
value += STENCIL_LOOKUP(2);
value += STENCIL_LOOKUP(3);
value += STENCIL_LOOKUP(4);
value += STENCIL_LOOKUP(5);
value += STENCIL_LOOKUP(6);
value += STENCIL_LOOKUP(7);
value += STENCIL_LOOKUP(8);
ptr_next[ix] =
2.0f * ptr_prev[ix] - ptr_next[ix] + value * ptr_vel[ix];
}
}
}
}
}
}
}
#endif
#ifdef USE_BASELINE
/*
* Device-Code
* OpenMP Offload implementation for single iteration of iso3dfd kernel.
* This function uses the default distribution of work
* It represents minimal changes to the CPU OpenMP code.
* Inner most loop order is changed from CPU OpenMP version to represent
* work-items in X-Y plane. And each work-item traverses the Z-plane
*/
void inline Iso3dfdIteration(float *ptr_next_base, float *ptr_prev_base,
float *ptr_vel_base, float *coeff,
const size_t n1, const size_t n2,
const size_t n3, const size_t n1_block,
const size_t n2_block,
const size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto n3_end = n3 - kHalfLength;
auto n2_end = n2 - kHalfLength;
auto n1_end = n1 - kHalfLength;
// Outer 3 loops just execute once if block sizes are same as the grid sizes,
// which is enforced here to demonstrate the baseline version.
for (auto bz = kHalfLength; bz < n3_end; bz += n3_block) {
for (auto by = kHalfLength; by < n2_end; by += n2_block) {
for (auto bx = kHalfLength; bx < n1_end; bx += n1_block) {
auto iz_end = GetMin(bz + n3_block, n3_end);
auto iy_end = GetMin(by + n2_block, n2_end);
auto ix_end = GetMin(bx + n1_block, n1_end);
#pragma omp target parallel for simd collapse(3)
for (auto iz = bz; iz < iz_end; iz++) {
for (auto iy = by; iy < iy_end; iy++) {
for (auto ix = bx; ix < ix_end; ix++) {
float *ptr_next = ptr_next_base + iz * dimn1n2 + iy * n1;
float *ptr_prev = ptr_prev_base + iz * dimn1n2 + iy * n1;
float *ptr_vel = ptr_vel_base + iz * dimn1n2 + iy * n1;
float value = ptr_prev[ix] * coeff[0];
value += STENCIL_LOOKUP(1);
value += STENCIL_LOOKUP(2);
value += STENCIL_LOOKUP(3);
value += STENCIL_LOOKUP(4);
value += STENCIL_LOOKUP(5);
value += STENCIL_LOOKUP(6);
value += STENCIL_LOOKUP(7);
value += STENCIL_LOOKUP(8);
ptr_next[ix] =
2.0f * ptr_prev[ix] - ptr_next[ix] + value * ptr_vel[ix];
}
}
}
}
}
}
}
#endif
#ifdef USE_OPT1
/*
* Device-Code
* OpenMP Offload implementation for single iteration of iso3dfd kernel.
* This function uses the tiling approach for distribution of work
* It represents minimal changes to the CPU OpenMP code.
* OpenMP teams are created and distributed to work on a TILE
* Inner most loop order is changed from CPU OpenMP version to represent
* work-items in X-Y plane. And each work-item traverses the Z-plane
*/
void inline Iso3dfdIteration(float *ptr_next_base, float *ptr_prev_base,
float *ptr_vel_base, float *coeff,
const size_t n1, const size_t n2,
const size_t n3, const size_t n1_block,
const size_t n2_block,
const size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto n3_end = n3 - kHalfLength;
auto n2_end = n2 - kHalfLength;
auto n1_end = n1 - kHalfLength;
#pragma omp target teams distribute collapse(3) \
num_teams((n3 / n3_block) * (n2 / n2_block) * (n1 / n1_block)) \
thread_limit(n1_block *n2_block)
{ // start of omp target
for (auto bz = kHalfLength; bz < n3_end; bz += n3_block) {
for (auto by = kHalfLength; by < n2_end; by += n2_block) {
for (auto bx = kHalfLength; bx < n1_end; bx += n1_block) {
auto iz_end = GetMin(bz + n3_block, n3_end);
auto iy_end = GetMin(by + n2_block, n2_end);
auto ix_end = GetMin(bx + n1_block, n1_end);
#pragma omp parallel for simd collapse(2) schedule(static, 1)
for (auto iy = by; iy < iy_end; iy++) {
for (auto ix = bx; ix < ix_end; ix++) {
for (auto iz = bz; iz < iz_end; iz++) {
float *ptr_next = ptr_next_base + iz * dimn1n2 + iy * n1;
float *ptr_prev = ptr_prev_base + iz * dimn1n2 + iy * n1;
float *ptr_vel = ptr_vel_base + iz * dimn1n2 + iy * n1;
float value = ptr_prev[ix] * coeff[0];
value += STENCIL_LOOKUP(1);
value += STENCIL_LOOKUP(2);
value += STENCIL_LOOKUP(3);
value += STENCIL_LOOKUP(4);
value += STENCIL_LOOKUP(5);
value += STENCIL_LOOKUP(6);
value += STENCIL_LOOKUP(7);
value += STENCIL_LOOKUP(8);
ptr_next[ix] =
2.0f * ptr_prev[ix] - ptr_next[ix] + value * ptr_vel[ix];
}
}
}
}
}
}
} // end of omp target
}
#endif
#ifdef USE_OPT2
/*
* Device-Code
* OpenMP Offload implementation for single iteration of iso3dfd kernel.
* This function uses the tiling approach for distribution of work
* It represents minimal changes to the CPU OpenMP code.
* OpenMP teams are created and distributed to work on a TILE
* Inner most loop order is changed from CPU OpenMP version to represent
* work-items in X-Y plane. And each work-item traverses the Z-plane
* In addition to this the data in the outer-most z-dimension is
* stored locally in registers front and back for re-use
*/
void inline Iso3dfdIteration(float *ptr_next_base, float *ptr_prev_base,
float *ptr_vel_base, float *coeff,
const size_t n1, const size_t n2,
const size_t n3, const size_t n1_block,
const size_t n2_block,
const size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto n3_end = n3 - kHalfLength;
auto n2_end = n2 - kHalfLength;
auto n1_end = n1 - kHalfLength;
#pragma omp target teams distribute collapse(3) \
num_teams((n3 / n3_block) * (n2 / n2_block) * (n1 / n1_block)) \
thread_limit(n1_block *n2_block)
{ // start of omp target
for (auto bz = kHalfLength; bz < n3_end; bz += n3_block) {
for (auto by = kHalfLength; by < n2_end; by += n2_block) {
for (auto bx = kHalfLength; bx < n1_end; bx += n1_block) {
auto iz_end = GetMin(bz + n3_block, n3_end);
auto iy_end = GetMin(by + n2_block, n2_end);
auto ix_end = GetMin(bx + n1_block, n1_end);
#pragma omp parallel for simd collapse(2) schedule(static, 1)
for (auto iy = by; iy < iy_end; iy++) {
for (auto ix = bx; ix < ix_end; ix++) {
auto gid = ix + (iy * n1) + (bz * dimn1n2);
float front[kHalfLength + 1];
float back[kHalfLength];
for (auto iter = 0; iter < kHalfLength; iter++) {
front[iter] = ptr_prev_base[gid + iter * dimn1n2];
}
for (auto iter = 1; iter <= kHalfLength; iter++) {
back[iter - 1] = ptr_prev_base[gid - iter * dimn1n2];
}
for (auto iz = bz; iz < iz_end; iz++) {
front[kHalfLength] = ptr_prev_base[gid + kHalfLength * dimn1n2];
float value = front[0] * coeff[0];
value += STENCIL_LOOKUP_Z(1);
value += STENCIL_LOOKUP_Z(2);
value += STENCIL_LOOKUP_Z(3);
value += STENCIL_LOOKUP_Z(4);
value += STENCIL_LOOKUP_Z(5);
value += STENCIL_LOOKUP_Z(6);
value += STENCIL_LOOKUP_Z(7);
value += STENCIL_LOOKUP_Z(8);
ptr_next_base[gid] = 2.0f * front[0] - ptr_next_base[gid] +
value * ptr_vel_base[gid];
gid += dimn1n2;
for (auto iter = kHalfLength - 1; iter > 0; iter--) {
back[iter] = back[iter - 1];
}
back[0] = front[0];
for (auto iter = 0; iter < kHalfLength; iter++) {
front[iter] = front[iter + 1];
}
}
}
}
}
}
}
} // end of omp target
}
#endif
#ifdef USE_OPT3
/*
* Device-Code
* OpenMP Offload implementation for single iteration of iso3dfd kernel.
* In this version the 3D-stencil is decomposed into smaller grids
* along the outer-most z-dimension. This results in multiple openmp CPU
* threads invoking omp target device kernels.
* This version also uses the tiling approach for distribution of work
* It represents minimal changes to the CPU OpenMP code.
* OpenMP teams are created and distributed to work on a TILE
* Inner most loop order is changed from CPU OpenMP version to represent
* work-items in X-Y plane. And each work-item traverses the Z-plane
* In addition to this the data in the outer-most z-dimension is
* stored locally in registers front and back for re-use
*/
void inline Iso3dfdIteration(float *ptr_next_base, float *ptr_prev_base,
float *ptr_vel_base, float *coeff,
const size_t n1, const size_t n2,
const size_t n3, const size_t n1_block,
const size_t n2_block,
const size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto n3_end = n3 - kHalfLength;
auto n2_end = n2 - kHalfLength;
auto n1_end = n1 - kHalfLength;
#pragma omp parallel for
for (auto bz = kHalfLength; bz < n3_end; bz += n3_block) {
#pragma omp target teams distribute collapse(2) \
num_teams((n2 / n2_block) * (n1 / n1_block)) \
thread_limit(n1_block *n2_block)
for (auto by = kHalfLength; by < n2_end; by += n2_block) {
for (auto bx = kHalfLength; bx < n1_end; bx += n1_block) {
auto iz_end = GetMin(bz + n3_block, n3_end);
auto iy_end = GetMin(by + n2_block, n2_end);
auto ix_end = GetMin(bx + n1_block, n1_end);
#pragma omp parallel for simd collapse(2) schedule(static, 1)
for (auto iy = by; iy < iy_end; iy++) {
for (auto ix = bx; ix < ix_end; ix++) {
auto gid = ix + (iy * n1) + (bz * dimn1n2);
float front[kHalfLength + 1];
float back[kHalfLength];
for (auto iter = 0; iter < kHalfLength; iter++) {
front[iter] = ptr_prev_base[gid + iter * dimn1n2];
}
for (auto iter = 1; iter <= kHalfLength; iter++) {
back[iter - 1] = ptr_prev_base[gid - iter * dimn1n2];
}
for (auto iz = bz; iz < iz_end; iz++) {
front[kHalfLength] = ptr_prev_base[gid + kHalfLength * dimn1n2];
float value = front[0] * coeff[0];
value += STENCIL_LOOKUP_Z(1);
value += STENCIL_LOOKUP_Z(2);
value += STENCIL_LOOKUP_Z(3);
value += STENCIL_LOOKUP_Z(4);
value += STENCIL_LOOKUP_Z(5);
value += STENCIL_LOOKUP_Z(6);
value += STENCIL_LOOKUP_Z(7);
value += STENCIL_LOOKUP_Z(8);
ptr_next_base[gid] = 2.0f * front[0] - ptr_next_base[gid] +
value * ptr_vel_base[gid];
gid += dimn1n2;
for (auto iter = kHalfLength - 1; iter > 0; iter--) {
back[iter] = back[iter - 1];
}
back[0] = front[0];
for (auto iter = 0; iter < kHalfLength; iter++) {
front[iter] = front[iter + 1];
}
}
}
}
}
}
}
}
#endif
/*
* Host-Code
* Driver function for ISO3DFD OpenMP Offload code
* Uses ptr_next and ptr_prev as ping-pong buffers to achieve
* accelerated wave propogation
* OpenMP Target region is declared and maintainted for all the
* time steps
*/
void Iso3dfd(float *ptr_next, float *ptr_prev, float *ptr_vel, float *coeff,
const size_t n1, const size_t n2,
const size_t n3, const size_t nreps,
const size_t n1_block, const size_t n2_block,
const size_t n3_block) {
auto dimn1n2 = n1 * n2;
auto size = n3 * dimn1n2;
float *temp = NULL;
#pragma omp target data map(ptr_next [0:size], ptr_prev [0:size]) map( \
ptr_vel [0:size], coeff [0:9], n1, n2, n3, n1_block, n2_block, n3_block)
for (auto it = 0; it < nreps; it += 1) {
#ifdef USE_BASELINE
Iso3dfdIteration(ptr_next, ptr_prev, ptr_vel, coeff, n1, n2, n3, n1, n2,
n3);
#else
Iso3dfdIteration(ptr_next, ptr_prev, ptr_vel, coeff, n1, n2, n3, n1_block,
n2_block, n3_block);
#endif
// here's where boundary conditions and halo exchanges happen
temp = ptr_next;
ptr_next = ptr_prev;
ptr_prev = temp;
}
}
int main(int argc, char *argv[]) {
// Arrays used to update the wavefield
float *prev_base;
float *next_base;
// Array to store wave velocity
float *vel_base;
bool error = false;
size_t n1, n2, n3;
size_t n1_block, n2_block, n3_block;
size_t num_iterations;
try {
n1 = std::stoi(argv[1]) + (2 * kHalfLength);
n2 = std::stoi(argv[2]) + (2 * kHalfLength);
n3 = std::stoi(argv[3]) + (2 * kHalfLength);
n1_block = std::stoi(argv[4]);
n2_block = std::stoi(argv[5]);
n3_block = std::stoi(argv[6]);
num_iterations = std::stoi(argv[7]);
}
catch (...) {
Usage(argv[0]);
return 1;
}
if (ValidateInput(std::stoi(argv[1]), std::stoi(argv[2]), std::stoi(argv[3]),
n1_block, n2_block, n3_block, num_iterations)) {
Usage(argv[0]);
return 1;
}
// Check for available omp offload capable device
int num_devices = omp_get_num_devices();
if (num_devices <= 0) {
std::cout << "--------------------------------------\n";
std::cout << " No OpenMP Offload device found\n";
Usage(argv[0]);
return 1;
}
auto nsize = n1 * n2 * n3;
prev_base = new float[nsize];
next_base = new float[nsize];
vel_base = new float[nsize];
// Compute coefficients to be used in wavefield update
float coeff[kHalfLength + 1] = {-3.0548446, +1.7777778, -3.1111111e-1,
+7.572087e-2, -1.76767677e-2, +3.480962e-3,
-5.180005e-4, +5.074287e-5, -2.42812e-6};
// Apply the DX DY and DZ to coefficients
coeff[0] = (3.0f * coeff[0]) / (dxyz * dxyz);
for (auto i = 1; i <= kHalfLength; i++) {
coeff[i] = coeff[i] / (dxyz * dxyz);
}
Initialize(prev_base, next_base, vel_base, n1, n2, n3);
std::cout << "Grid Sizes: " << n1 - 2 * kHalfLength << " "
<< n2 - 2 * kHalfLength << " " << n3 - 2 * kHalfLength << "\n";
#if defined(USE_OPT1) || defined(USE_OPT2) || defined(USE_OPT3)
std::cout << "Tile sizes: " << n1_block << " " << n2_block << " " << n3_block
<< "\n";
#ifdef USE_OPT1
std::cout << "Using Optimized target code - version 1:\n";
std::cout << "--OMP_Offload with Tiling\n";
#elif USE_OPT2
std::cout << "Using Optimized target code - version 2:\n";
std::cout << "--OMP_Offload with Tiling and Z Window\n";
#elif USE_OPT3
std::cout << "Using Optimized target code - version 3:\n";
std::cout << "--OMP Threads + OMP_Offload with Tiling and Z Window\n";
#endif
#else
std::cout << "Tile sizes ignored for OMP Offload\n";
std::cout << "--Using Baseline version with omp target with collapse\n";
#endif
std::cout << "Memory Usage (MBytes): "
<< ((3 * nsize * sizeof(float)) / (1024 * 1024)) << "\n";
auto start = std::chrono::steady_clock::now();
Iso3dfd(next_base, prev_base, vel_base, coeff, n1, n2, n3, num_iterations,
n1_block, n2_block, n3_block);
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
PrintStats(time, n1, n2, n3, num_iterations);
#ifdef VERIFY_RESULTS
error = VerifyResults(next_base, prev_base, vel_base, coeff, n1, n2, n3,
num_iterations, n1_block, n2_block, n3_block);
#endif
delete[] prev_base;
delete[] next_base;
delete[] vel_base;
return error ? 1 : 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/StructuredGrids/iso3dfd_omp_offload/include/iso3dfd.h | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstring>
#include <ctime>
#include <fstream>
#include <iostream>
#include <string>
constexpr float dt = 0.002f;
constexpr float dxyz = 50.0f;
constexpr size_t kHalfLength = 8;
constexpr size_t kMaxTeamSizeLimit = 256;
#define STENCIL_LOOKUP(ir) \
(coeff[ir] * ((ptr_prev[ix + ir] + ptr_prev[ix - ir]) + \
(ptr_prev[ix + ir * n1] + ptr_prev[ix - ir * n1]) + \
(ptr_prev[ix + ir * dimn1n2] + ptr_prev[ix - ir * dimn1n2])))
#define STENCIL_LOOKUP_Z(ir) \
(coeff[ir] * (front[ir] + back[ir - 1] + ptr_prev_base[gid + ir] + \
ptr_prev_base[gid - ir] + ptr_prev_base[gid + ir * n1] + \
ptr_prev_base[gid - ir * n1]))
void Usage(const std::string& programName);
void PrintStats(double time, size_t n1, size_t n2, size_t n3,
size_t num_iterations);
bool WithinEpsilon(float* output, float* reference, size_t dim_x,
size_t dim_y, size_t dim_z, size_t radius,
const int zadjust, const float delta);
void Initialize(float* ptr_prev, float* ptr_next, float* ptr_vel,
size_t n1, size_t n2, size_t n3);
bool VerifyResults(float* next_base, float* prev_base, float* vel_base,
float* coeff, size_t n1, size_t n2,
size_t n3, size_t num_iterations,
size_t n1_block, size_t n2_block,
size_t n3_block);
bool ValidateInput(size_t n1, size_t n2, size_t n3,
size_t n1_block, size_t n2_block,
size_t n3_block, size_t num_iterations);
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/guided_matmul_opt_report/src/driver.c | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#define ROW 101
#define COL 101
#define COLBUF 0
#define COLWIDTH COL+COLBUF
#define REPEATNTIMES 1000000
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include "multiply.h"
/* routine to initialize an array with data */
void init_matrix(int row, int col, FTYPE off, FTYPE a[][COLWIDTH])
{
int i,j;
for (i=0; i< row;i++) {
for (j=0; j< col;j++) {
a[i][j] = fmod(i*j+off,10.0);
}
}
if (COLBUF>0)
for (i=0;i<row;i++)
for (j=col;j<COLWIDTH;j++)
a[i][j]=0.0;
}
void init_array(int length, FTYPE off, FTYPE a[])
{
int i;
for (i=0; i< length;i++)
a[i] = fmod(i+off,10.0);
if (COLBUF>0)
for (i=length;i<COLWIDTH;i++)
a[i]=0.0;
}
void printsum(int length, FTYPE ans[]) {
/* Doesn't print the whole matrix - Just a very simple Checksum */
int i;
double sum=0.0;
for (i=0;i<length;i++) sum+=ans[i];
printf("Sum of result = %f\n", sum);
}
double clock_it(void)
{
double duration = 0.0;
struct timeval start;
gettimeofday(&start, NULL);
duration = (double)(start.tv_sec + start.tv_usec/1000000.0);
return duration;
}
int main()
{
double execTime = 0.0;
double startTime, endTime;
int k, size1, size2;
FTYPE a[ROW][COLWIDTH];
FTYPE b[ROW];
FTYPE x[COLWIDTH];
size1 = ROW;
size2 = COLWIDTH;
printf("\nROW:%d COL: %d\n",ROW,COLWIDTH);
/* initialize the arrays with data */
init_matrix(ROW,COL,1,a);
init_array(COL,3,x);
/* start timing the matrix multiply code */
startTime = clock_it();
for (k = 0;k < REPEATNTIMES;k++) {
#ifdef NOFUNCCALL
int i, j;
for (i = 0; i < size1; i++) {
b[i] = 0;
for (j = 0;j < size2; j++) {
b[i] += a[i][j] * x[j];
}
}
#else
matvec(size1,size2,a,b,x);
#endif
x[0] = x[0] + 0.000001;
}
endTime = clock_it();
execTime = endTime - startTime;
printf("Execution time is %2.3f seconds\n", execTime);
printf("GigaFlops = %f\n", (((double)REPEATNTIMES * (double)COL * (double)ROW * 2.0) / (double)(execTime))/1000000000.0);
printsum(COL,b);
return 0;
}
| c |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/guided_matmul_opt_report/src/multiply.h | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef FTYPE
#define FTYPE double
#endif
void matvec(int size1, int size2, FTYPE a[][size2], FTYPE b[], FTYPE x[]);
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/guided_matmul_opt_report/src/multiply.c | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "multiply.h"
void matvec(int size1, int size2, FTYPE a[][size2], FTYPE b[], FTYPE x[])
{
int i, j;
for (i = 0; i < size1; i++) {
b[i] = 0;
for (j = 0;j < size2; j++) {
b[i] += a[i][j] * x[j];
}
}
}
| c |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/Intrinsics/src/intrin_dot_sample.cpp | /* [DESCRIPTION]
* This C code sample demonstrates how to use C, Intel(R) MMX(TM),
* Intel(R) Streaming SIMD Extensions 3 (Intel(R) SSE3),
* Intel(R) Advanced Vector Extensions (Intel(R) AVX), and
* Intel(R) Advanced Vector Extensions 2 (Intel(R) AVX2)
* intrinsics to calculate the dot product of two vectors.
*
* Do not run the sample on systems using processors that do
* not support Intel(R) MMX(TM), Intel(R) SSE3; the application
* will fail.
*
* [Output]
* Dot Product computed by C: 4324.000000
* Dot Product computed by Intel(R) SSE3 intrinsics: 4324.000000
* Dot Product computed by Intel(R) AVX intrinsics: 4324.000000
* Dot Product computed by Intel(R) AVX2 intrinsics: 4324.000000
* Dot Product computed by Intel(R) MMX(TM) intrinsics: 4324
*
*/
#include <immintrin.h>
#include <omp.h>
#include <pmmintrin.h>
#include <stdio.h>
#define SIZE 24 // assumes size is a multiple of 8 because
// Intel(R) AVX registers will store 8, 32bit elements.
// Computes dot product using C
float dot_product(float *a, float *b);
// Computes dot product using SIMD
float dot_product_SIMD(float *a, float *b);
// Computes dot product using Intel(R) SSE intrinsics
float dot_product_intrin(float *a, float *b);
// Computes dot product using Intel(R) AVX intrinsics
float AVX_dot_product(float *a, float *b);
float AVX2_dot_product(float *a, float *b);
// Computes dot product using Intel(R) MMX(TM) intrinsics
short MMX_dot_product(short *a, short *b);
#define MMX_DOT_PROD_ENABLED (__INTEL_COMPILER || (_MSC_VER && !_WIN64))
int main() {
float x[SIZE], y[SIZE];
short a[SIZE], b[SIZE];
int i;
float product;
short mmx_product;
for (i = 0; i < SIZE; i++) {
x[i] = i;
y[i] = i;
a[i] = i;
b[i] = i;
}
product = dot_product(x, y);
printf("Dot Product computed by C: %f\n", product);
product = dot_product_SIMD(x, y);
printf("Dot Product computed by C + SIMD: %f\n", product);
product = dot_product_intrin(x, y);
printf("Dot Product computed by Intel(R) SSE3 intrinsics: %f\n", product);
// The Visual Studio* editor will show the following section as disabled as it
// does not know that __INTEL_COMPILER is defined by the Intel (R) Compiler
#if __INTEL_COMPILER
if (_may_i_use_cpu_feature(_FEATURE_AVX2)) {
product = AVX2_dot_product(x, y);
printf("Dot Product computed by Intel(R) AVX2 intrinsics: %f\n", product);
} else
printf("Your Processor does not support Intel(R) AVX2 instrinsics.\n");
if (_may_i_use_cpu_feature(_FEATURE_AVX)) {
product = AVX_dot_product(x, y);
printf("Dot Product computed by Intel(R) AVX intrinsics: %f\n", product);
} else
printf("Your Processor does not support Intel(R) AVX intrinsics.\n");
#else
printf("Use Intel(R) Compiler to compute with Intel(R) AVX intrinsics\n");
#endif
#if MMX_DOT_PROD_ENABLED
mmx_product = MMX_dot_product(a, b);
_mm_empty();
printf("Dot Product computed by Intel(R) MMX(TM) intrinsics: %d\n",
mmx_product);
#else
printf(
"Use Intel(R) compiler in order to calculate dot product using Intel(R) "
"MMX(TM) intrinsics\n");
#endif
return 0;
}
float dot_product(float *a, float *b) {
int i;
int sum = 0;
for (i = 0; i < SIZE; i++) {
sum += a[i] * b[i];
}
return sum;
}
float dot_product_SIMD(float *a, float *b) {
int i;
int sum = 0;
#pragma omp simd reduction(+ : sum)
for (i = 0; i < SIZE; i++) {
sum += a[i] * b[i];
}
return sum;
}
// The Visual Studio* editor will show the following section as disabled as it
// does not know that __INTEL_COMPILER is defined by the Intel(R) Compiler
#if __INTEL_COMPILER
float AVX2_dot_product(float *a, float *b) {
float total;
int i;
__m256 num1, num2, num3;
__m128 top, bot;
num3 = _mm256_setzero_ps(); // sets sum to zero
for (i = 0; i < SIZE; i += 8) {
num1 = _mm256_loadu_ps(a + i); // loads unaligned array a into num1
// num1= a[7] a[6] a[5] a[4] a[3] a[2] a[1] a[0]
num2 = _mm256_loadu_ps(b + i); // loads unaligned array b into num2
// num2= b[7] b[6] b[5] b[4] b[3] b[2] b[1] b[0]
num3 = _mm256_fmadd_ps(
num1, num2, num3); // performs multiplication and vertical addition
// num3 = a[7]*b[7]+num3[7] a[6]*b[6]+num3[6] a[5]*b[5]+num3[5]
// a[4]*b[4]+num3[4]
// a[3]*b[3]+num3[3] a[2]*b[2]+num3[2] a[1]*b[1]+num3[1]
// a[0]*b[0]+num3[0]
}
num3 = _mm256_hadd_ps(num3, num3); // performs horizontal addition
// For example, if num3 is filled with: 7 6 5 4 3 2 1 0
// then num3 = 13 9 13 9 5 1 5 1
// extracting the __m128 from the __m256 datatype
top = _mm256_extractf128_ps(num3, 1); // top = 13 9 13 9
bot = _mm256_extractf128_ps(num3, 0); // bot = 5 1 5 1
// completing the reduction
top = _mm_add_ps(top, bot); // top = 14 10 14 10
top = _mm_hadd_ps(top, top); // top = 24 24 24 24
_mm_store_ss(&total, top); // Storing the result in total
return total;
}
float AVX_dot_product(float *a, float *b) {
float total;
int i;
__m256 num1, num2, num3, num4;
__m128 top, bot;
num4 = _mm256_setzero_ps(); // sets sum to zero
for (i = 0; i < SIZE; i += 8) {
num1 = _mm256_loadu_ps(a + i); // loads unaligned array a into num1
// num1= a[7] a[6] a[5] a[4] a[3] a[2] a[1] a[0]
num2 = _mm256_loadu_ps(b + i); // loads unaligned array b into num2
// num2= b[7] b[6] b[5] b[4] b[3] b[2] b[1] b[0]
num3 = _mm256_mul_ps(num1, num2); // performs multiplication
// num3 = a[7]*b[7] a[6]*b[6] a[5]*b[5] a[4]*b[4] a[3]*b[3] a[2]*b[2]
// a[1]*b[1] a[0]*b[0]
num4 = _mm256_add_ps(num4, num3); // performs vertical addition
}
num4 = _mm256_hadd_ps(num4, num4); // performs horizontal addition
// For example, if num4 is filled with: 7 6 5 4 3 2 1 0
// then num4 = 13 9 13 9 5 1 5 1
// extracting the __m128 from the __m256 datatype
top = _mm256_extractf128_ps(num4, 1); // top = 13 9 13 9
bot = _mm256_extractf128_ps(num4, 0); // bot = 5 1 5 1
// completing the reduction
top = _mm_add_ps(top, bot); // top = 14 10 14 10
top = _mm_hadd_ps(top, top); // top = 24 24 24 24
_mm_store_ss(&total, top); // Storing the result in total
return total;
}
#endif
float dot_product_intrin(float *a, float *b) {
float total;
int i;
__m128 num1, num2, num3, num4;
__m128 num5;
num4 = _mm_setzero_ps(); // sets sum to zero
for (i = 0; i < SIZE; i += 4) {
num1 = _mm_loadu_ps(
a +
i); // loads unaligned array a into num1 num1= a[3] a[2] a[1] a[0]
num2 = _mm_loadu_ps(
b +
i); // loads unaligned array b into num2 num2= b[3] b[2] b[1] b[0]
num3 = _mm_mul_ps(num1, num2); // performs multiplication num3 =
// a[3]*b[3] a[2]*b[2] a[1]*b[1] a[0]*b[0]
num3 = _mm_hadd_ps(num3, num3); // performs horizontal addition
// num3= a[3]*b[3]+ a[2]*b[2] a[1]*b[1]+a[0]*b[0] a[3]*b[3]+ a[2]*b[2]
// a[1]*b[1]+a[0]*b[0]
num4 = _mm_add_ps(num4, num3); // performs vertical addition
}
num4 = _mm_hadd_ps(num4, num4);
_mm_store_ss(&total, num4);
return total;
}
// Intel(R) MMX(TM) technology cannot handle single precision floats
#if MMX_DOT_PROD_ENABLED
short MMX_dot_product(short *a, short *b) {
int i;
short result, data;
__m64 num3, sum;
__m64 *ptr1, *ptr2;
_m_empty();
sum = _mm_setzero_si64(); // sets sum to zero
for (i = 0; i < SIZE; i += 4) {
ptr1 = (__m64 *)&a[i]; // Converts array a to a pointer of type
//__m64 and stores four elements into
// Intel(R) MMX(TM) registers
ptr2 = (__m64 *)&b[i];
num3 = _m_pmaddwd(*ptr1, *ptr2); // multiplies elements and adds lower
// elements with lower element and
// higher elements with higher
sum = _m_paddw(sum, num3);
}
data = _m_to_int(sum); // converts __m64 data type to an int
sum = _m_psrlqi(sum, 32); // shifts sum
result = _m_to_int(sum);
result = result + data;
_mm_empty(); // clears the Intel(R) MMX(TM) registers and
// Intel(R) MMX(TM) state.
return result;
}
#endif
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/Intrinsics/src/intrin_double_sample.cpp | /* [DESCRIPTION]
* This C code sample demonstrates how to use C in
* comparison with
* Intel(R) Streaming SIMD Extensions 2 (Intel(R) SSE2),
* Intel(R) Streaming SIMD Extensions 3 (Intel(R) SSE3),
* Intel(R) Advanced Vector Extensions (Intel(R) AVX), and
* Intel(R) Advanced Vector Extensions 2 (Intel(R) AVX2)
* intrinsics to multiply two complex numbers.
*
* Do not run the sample on systems using processors that do
* not support Intel(R) SSE2, Intel(R) SSE3; the application
* will fail.
*
* [Output]
* Complex Product(C): 23.00+ -2.00i
* Complex Product(Intel(R) AVX2): 23.00+ -2.00i
* Complex Product(Intel(R) AVX): 23.00+ -2.00i
* Complex Product(Intel(R) SSE3): 23.00+ -2.00i
* Complex Product(Intel(R) SSE2): 23.00+ -2.00i
*
*/
#include <immintrin.h>
#include <pmmintrin.h>
#include <stdio.h>
typedef struct {
double real;
double img;
} complex_num;
// Multiplying complex numbers in C
void multiply_C(complex_num x, complex_num y, complex_num *z) {
z->real = (x.real * y.real) - (x.img * y.img);
z->img = (x.img * y.real) + (y.img * x.real);
}
// The Visual Studio* editor will show the following section as disabled as it
// does not know that __INTEL_COMPILER is defined by the Intel(R) Compiler
#if __INTEL_COMPILER
// Multiplying complex numbers using Intel(R) AVX2 intrinsics
void multiply_AVX2(complex_num x, complex_num y, complex_num *z) {
__m256d num1, num2, num3;
__m128d top, bot;
// initialize
// num1 = [x.real,-x.img,x.img,y.img]
num1 = _mm256_set_pd(x.real, -x.img, x.img, y.img);
// num2 = [y.real,y.img,y.real,x.real]
num2 = _mm256_set_pd(y.real, y.img, y.real, x.real);
// multiply the two
// num3 = [(x.real*y.real),(-x.img*y.img),(x.img*y.real),(y.img*x.real)]
num3 = _mm256_mul_pd(num1, num2);
// horizontally add
// num3 = [(x.real*y.real-x.img*y.img),(x.real*y.real-x.img*y.img),
// (x.img*y.real+y.img*x.real),(x.img*y.real+y.img*x.real)]
num3 = _mm256_hadd_pd(num3, num3);
// permute num3 so that we have what we need to store in the lower half
// num3 = [(x.real*y.real-x.img*y.img),(x.real*y.real-x.img*y.img),
// (x.img*y.real+y.img*x.real),(x.real*y.real-x.img*y.img)]
num3 = _mm256_permute4x64_pd(num3, 0b11100110);
// obtain the 128 bit part that we need to store
// bot = [(x.img*y.real+y.img*x.real),(x.real*y.real-x.img*y.img)]
bot = _mm256_extractf128_pd(num3, 0);
// store the result in z
_mm_storeu_pd((double *)z, bot);
}
// Multiplying complex numbers using Intel(R) AVX intrinsics
void multiply_AVX(complex_num x, complex_num y, complex_num *z) {
__m256d num1, num2, num3;
__m128d bot;
// initialize
// num1 = [x.real,-x.img,x.img,y.img]
num1 = _mm256_set_pd(x.real, -x.img, x.img, y.img);
// num2 = [y.real,y.img,y.real,x.real]
num2 = _mm256_set_pd(y.real, y.img, y.real, x.real);
// multiply the two
// num3 = [(x.real*y.real),(-x.img*y.img),(x.img*y.real),(y.img*x.real)]
num1 = _mm256_mul_pd(num1, num2);
// horizontally add
// num3 = [(x.real*y.real-x.img*y.img),(x.real*y.real-x.img*y.img),
// (x.img*y.real+y.img*x.real),(x.img*y.real+y.img*x.real)]
num1 = _mm256_hadd_pd(num1, num1);
// flip the 128 bit halves of num3 and store in num2
// num2 = [(x.img*y.real+y.img*x.real),(x.img*y.real+y.img*x.real),
// (x.real*y.real-x.img*y.img),(x.real*y.real-x.img*y.img)]
num2 = _mm256_permute2f128_pd(num1, num1, 1);
// blend num2 and num3 together so we get what we need to store
// num3 = [(x.real*y.real-x.img*y.img),(x.real*y.real-x.img*y.img),
// (x.img*y.real+y.img*x.real),(x.real*y.real-x.img*y.img)]
num1 = _mm256_blend_pd(num1, num2, 1);
// obtain the 128 bit part that we need to store
// bot = [(x.img*y.real+y.img*x.real),(x.real*y.real-x.img*y.img)]
bot = _mm256_extractf128_pd(num1, 0);
// store the result in z
_mm_storeu_pd((double *)z, bot);
}
#endif
// Multiplying complex numbers using Intel(R) SSE3 intrinsics
void multiply_SSE3(complex_num x, complex_num y, complex_num *z) {
__m128d num1, num2, num3;
// Duplicates lower vector element into upper vector element.
// num1: [x.real, x.real]
num1 = _mm_loaddup_pd(&x.real);
// Move y elements into a vector
// num2: [y.img, y.real]
num2 = _mm_set_pd(y.img, y.real);
// Multiplies vector elements
// num3: [(x.real*y.img), (x.real*y.real)]
num3 = _mm_mul_pd(num2, num1);
// num1: [x.img, x.img]
num1 = _mm_loaddup_pd(&x.img);
// Swaps the vector elements
// num2: [y.real, y.img]
num2 = _mm_shuffle_pd(num2, num2, 1);
// num2: [(x.img*y.real), (x.img*y.img)]
num2 = _mm_mul_pd(num2, num1);
// Adds upper vector element while subtracting lower vector element
// num3: [((x.real *y.img)+(x.img*y.real)),
// ((x.real*y.real)-(x.img*y.img))]
num3 = _mm_addsub_pd(num3, num2);
// Stores the elements of num3 into z
_mm_storeu_pd((double *)z, num3);
}
// Multiplying complex numbers using Intel(R) SSE2 intrinsics
void multiply_SSE2(complex_num x, complex_num y, complex_num *z)
{
__m128d num1, num2, num3, num4;
// Copies a single element into the vector
// num1: [x.real, x.real]
num1 = _mm_load1_pd(&x.real);
// Move y elements into a vector
// num2: [y.img, y.real]
num2 = _mm_set_pd(y.img, y.real);
// Multiplies vector elements
// num3: [(x.real*y.img), (x.real*y.real)]
num3 = _mm_mul_pd(num2, num1);
// num1: [x.img, x.img]
num1 = _mm_load1_pd(&x.img);
// Swaps the vector elements.
// num2: [y.real, y.img]
num2 = _mm_shuffle_pd(num2, num2, 1);
// num2: [(x.img*y.real), (x.img*y.img)]
num2 = _mm_mul_pd(num2, num1);
num4 = _mm_add_pd(num3, num2);
num3 = _mm_sub_pd(num3, num2);
num4 = _mm_shuffle_pd(num3, num4, 2);
// Stores the elements of num4 into z
_mm_storeu_pd((double *)z, num4);
}
int main()
{
complex_num a, b, c;
// Initialize complex numbers
a.real = 3;
a.img = 2;
b.real = 5;
b.img = -4;
// Output for each: 23.00+ -2.00i
multiply_C(a, b, &c);
printf("Complex Product(C): %2.2f+ %2.2fi\n", c.real, c.img);
#if __INTEL_COMPILER
if (_may_i_use_cpu_feature(_FEATURE_AVX2)) {
multiply_AVX2(a, b, &c);
printf("Complex Product(Intel(R) AVX2): %2.2f+ %2.2fi\n", c.real, c.img);
} else
printf("Your processor does not support Intel(R) AVX2 intrinsics.\n");
if (_may_i_use_cpu_feature(_FEATURE_AVX)) {
multiply_AVX(a, b, &c);
printf("Complex Product(Intel(R) AVX): %2.2f+ %2.2fi\n", c.real, c.img);
} else
printf("Your processor does not support AVX intrinsics.\n");
#endif
multiply_SSE3(a, b, &c);
printf("Complex Product(Intel(R) SSE3): %2.2f+ %2.2fi\n", c.real, c.img);
multiply_SSE2(a, b, &c);
printf("Complex Product(Intel(R) SSE2): %2.2f+ %2.2fi\n", c.real, c.img);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/Intrinsics/src/intrin_ftz_sample.cpp | /* [DESCRIPTION]
* This code sample demonstrates how to use the
* _MM_GET_FLUSH_ZERO_MODE() and _MM_GET_DENORMALS_ZERO_MODE()
* macros to read the FTZ and DAZ flags in the control register.
*
* [Compile]
* Windows*: icl
*
* Linux* and macOS*: icc -o <output file name>
*
* Turning off optimization changes the state of the registers.
* Windows*: icl /Od
*
* Linux* and macOS*: icc -O0
*
* [Output]
* Shows the state of the FTZ and DAZ registers.
*
*/
#include <pmmintrin.h>
#include <stdio.h>
#include <xmmintrin.h>
#pragma warning(disable : 4003)
int main(void) {
/* Test the control register for flush to zero mode */
if (_MM_GET_FLUSH_ZERO_MODE())
printf("FTZ is set.\n");
else
printf("FTZ is not set.\n");
/* Test the control register for denormals mode */
if (_MM_GET_DENORMALS_ZERO_MODE())
printf("DAZ is set.\n");
else
printf("DAZ is not set.\n");
return (0);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/OpenMP_Offload_Features/src/user_defined_mapper.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
extern "C" int printf(const char *, ...);
struct MyArr {
int num;
int *arr;
};
#pragma omp declare mapper(id : MyArr c) map(c.num, c.arr [0:c.num])
void foo(int num, int *arr, int *arr_one) {
int i;
MyArr c;
c.num = num;
c.arr = arr;
for (i = 0; i < num; ++i)
printf("%s%3d %s", (i == 0 ? "In : " : ""), c.arr[i],
(i == num - 1 ? "\n" : ""));
#pragma omp target map(mapper(id), tofrom : c)
{
int j;
for (j = 0; j < c.num; ++j) c.arr[j] *= 2;
}
for (i = 0; i < num; ++i)
printf("%s%3d %s", (i == 0 ? "Out: " : ""), c.arr[i],
(i == num - 1 ? "\n" : ""));
MyArr c_one;
c_one.num = num;
c_one.arr = arr_one;
for (i = 0; i < num; ++i)
printf("%s%3d %s", (i == 0 ? "In : " : ""), c_one.arr[i],
(i == num - 1 ? "\n" : ""));
#pragma omp target map(mapper(id), tofrom : c_one)
{
int j;
for (j = 0; j < c_one.num; ++j) c_one.arr[j] *= 2;
}
for (i = 0; i < num; ++i)
printf("%s%3d %s", (i == 0 ? "Out: " : ""), c_one.arr[i],
(i == num - 1 ? "\n" : ""));
}
int main() {
int arr4[] = {1, 2, 4, 8};
int arr8[] = {1, 2, 4, 8, 16, 32, 64, 128};
int arr4_one[] = {1, 2, 4, 8};
int arr8_one[] = {1, 2, 4, 8, 16, 32, 64, 128};
foo(sizeof(arr4) / sizeof(arr4[0]), arr4, arr4_one);
foo(sizeof(arr8) / sizeof(arr8[0]), arr8, arr8_one);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/OpenMP_Offload_Features/src/usm_and_composability_with_dpcpp.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <sycl/sycl.hpp>
#include <algorithm>
#include <iostream>
using namespace sycl;
extern "C" void *omp_target_get_context(int);
#ifdef OCL_BACKEND
#pragma omp requires unified_shared_memory
#endif
int main() {
const unsigned kSize = 200;
int d = omp_get_default_device();
#ifdef OCL_BACKEND
sycl::queue q(
sycl::context(static_cast<cl_context>(omp_target_get_context(d))),
sycl::gpu_selector());
#else
sycl::queue q;
#endif
std::cout << "SYCL: Running on "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
if (!q.get_device()
.get_info<sycl::info::device::usm_shared_allocations>()) {
std::cout << "SYCL: USM is not available\n";
return 0;
}
auto Validate = [](int *data) {
for (unsigned i = 0; i < kSize; ++i)
if (data[i] != 100 + i) return "failed";
return "passed";
};
auto TestOmp = [&](int *data) {
std::fill_n(data, kSize, -1);
#pragma omp target parallel for device(d)
for (unsigned i = 0; i < kSize; ++i) {
data[i] = 100 + i;
}
return Validate(data);
};
auto TestDPCPP = [&](int *data) {
std::fill_n(data, kSize, -1);
q.parallel_for<class K>(sycl::range<1>(kSize), [=] (sycl::id<1> i)
{data[i] = 100 + i; }).wait();
return Validate(data);
};
int *omp_mem = (int *)omp_target_alloc_shared(kSize * sizeof(int), d);
int *dpcpp_mem = sycl::malloc_shared<int>(kSize, q);
std::cout << "SYCL and OMP memory: " << TestDPCPP(omp_mem) << "\n";
std::cout << "OMP and OMP memory: " << TestOmp(omp_mem) << "\n";
std::cout << "OMP and SYCL memory: " << TestOmp(dpcpp_mem) << "\n";
std::cout << "SYCL and SYCL memory: " << TestDPCPP(dpcpp_mem) << "\n";
omp_target_free(omp_mem, d);
sycl::free(dpcpp_mem, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/OpenMP_Offload_Features/src/class_member_functor.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <bits/stdc++.h>
using namespace std;
// A Functor
class Inc {
private:
int num;
public:
Inc(int n) : num(n) {}
int operator()(int arr_num) const { return num + arr_num; }
};
int main() {
int arr[] = {1, 2, 3, 4, 5};
int n = sizeof(arr) / sizeof(arr[0]);
int add5 = 5;
Inc a_inc(add5);
#pragma omp target teams distribute parallel for map(arr [0:n]) map(to : a_inc)
for (int k = 0; k < n; k++) {
arr[k] = arr[k] + a_inc(k);
}
for (int i = 0; i < n; i++) cout << arr[i] << " ";
cout << "\n"
<< "Done ......\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CompilerInfrastructure/OpenMP_Offload_Features/src/function_pointer.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <stdio.h>
#include <string.h>
#pragma omp declare target
int foo(int y) {
printf("called from device, y = %d\n", y);
return y;
}
#pragma omp end declare target
int main() {
int x = 0;
int y = 100;
int (*fptr)(int) = foo;
#pragma omp target teams \
distribute parallel for \
firstprivate(y) reduction(+: x) map(to: fptr)
for (int k = 0; k < 16; k++) {
fptr = foo;
x = x + fptr(y + k);
}
printf("Output x = %d\n", x);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CombinationalLogic/MandelbrotOMP/src/main.cpp | //==============================================================
//
// Copyright 2020 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ===============================================================
// Initial conditions: rectangle (for image) = { (-2.5, -0.875), (1, 0.875) }
// height = 1024
// width = 2048
// max_depth = 100
//
// Finds the mandelbrot set given initial conditions, and saves results to a png
// image. The real portion of the complex number is the x-axis, and the
// imaginary portion is the y-axis
//
// You can optionally compile with GCC and MSC, but just the linear, scalar
// version will compile and it will not have all optimizations
#include <emmintrin.h>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <complex>
#include "mandelbrot.hpp"
#include "timer.hpp"
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
void write_image(const char* filename, int width, int height,
unsigned char* output) {
stbi_write_png(filename, width, height, 1, output, width);
}
int main(int argc, char* argv[]) {
double x0 = -2.5;
double y0 = -0.875;
double x1 = 1;
double y1 = 0.875;
// Modifiable parameters:
int height = 1024;
int width = 2048; // Width should be a multiple of 8
int max_depth = 100;
assert(width % 8 == 0);
#if !defined(__INTEL_COMPILER) && !defined(__INTEL_LLVM_COMPILER)
CUtilTimer timer;
printf(
"This example will check how many iterations of z_n+1 = z_n^2 + c a "
"complex set will remain bounded.\n");
#ifdef PERF_NUM
double avg_time = 0;
for (int i = 0; i < 5; ++i) {
#endif
printf("Starting serial, scalar Mandelbrot...\n");
timer.start();
unsigned char* output =
serial_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
printf("Calculation finished. Processing time was %.0fms\n",
timer.get_time() * 1000.0);
printf("Saving image...\n\n");
write_image("mandelbrot_serial.png", width, height, output);
_mm_free(output);
#ifdef PERF_NUM
avg_time += timer.get_time();
}
printf("avg time: %.0fms\n", avg_time * 1000.0 / 5);
#endif
#else
int option = 0;
#ifndef PERF_NUM
// Checks to see if option was given at command line
if (argc > 1) {
// Prints out instructions and quits
if (argv[1][0] == 'h') {
printf(
"This example will check how many iterations of z_n+1 = z_n^2 + c a "
"complex set will remain bounded. Pick which parallel method you "
"would like to use.\n");
printf(
"[0] all tests\n[1] serial/scalar\n[2] OpenMP SIMD\n[3] OpenMP "
"Parallel\n[4] OpenMP Both\n > ");
return 0;
} else {
option = atoi(argv[1]);
}
}
// If no options are given, prompt user to choose an option
else {
printf(
"This example will check how many iterations of z_n+1 = z_n^2 + c a "
"complex set will remain bounded. Pick which parallel method you would "
"like to use.\n");
printf(
"[0] all tests\n[1] serial/scalar\n[2] OpenMP SIMD\n[3] OpenMP "
"Parallel\n[4] OpenMP Both\n > ");
scanf("%i", &option);
}
#endif // !PERF_NUM
CUtilTimer timer;
double serial_time, omp_simd_time, omp_parallel_time, omp_both_time;
unsigned char* output;
switch (option) {
case 0: {
#ifdef PERF_NUM
double avg_time[4] = {0.0};
for (int i = 0; i < 5; ++i) {
#endif
printf("\nRunning all tests\n");
printf("\nStarting serial, scalar Mandelbrot...\n");
timer.start();
output = serial_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
serial_time = timer.get_time();
printf("Calculation finished. Processing time was %.0fms\n",
serial_time * 1000.0);
printf("Saving image as mandelbrot_serial.png\n");
write_image("mandelbrot_serial.png", width, height, output);
_mm_free(output);
printf("\nStarting OMP SIMD Mandelbrot...\n");
timer.start();
output = simd_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
omp_simd_time = timer.get_time();
printf("Calculation finished. Processing time was %.0fms\n",
omp_simd_time * 1000.0);
printf("Saving image as mandelbrot_simd.png\n");
write_image("mandelbrot_simd.png", width, height, output);
_mm_free(output);
printf("\nStarting OMP Parallel Mandelbrot...\n");
timer.start();
output = parallel_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
omp_parallel_time = timer.get_time();
printf("Calculation finished. Processing time was %.0fms\n",
omp_parallel_time * 1000.0);
printf("Saving image as mandelbrot_parallel.png\n");
write_image("mandelbrot_parallel.png", width, height, output);
_mm_free(output);
printf("\nStarting OMP SIMD + Parallel Mandelbrot...\n");
timer.start();
output = omp_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
omp_both_time = timer.get_time();
printf("Calculation finished. Processing time was %.0fms\n",
omp_both_time * 1000.0);
printf("Saving image as mandelbrot_simd_parallel.png\n");
write_image("mandelbrot_simd_parallel.png", width, height, output);
_mm_free(output);
#ifndef PERF_NUM
}
#endif
#ifdef PERF_NUM
avg_time[0] += serial_time;
avg_time[1] += omp_simd_time;
avg_time[2] += omp_parallel_time;
avg_time[3] += omp_both_time;
}
printf("\navg time (serial) : %.0fms\n",
avg_time[0] * 1000.0 / 5);
printf("avg time (simd) : %.0fms\n",
avg_time[1] * 1000.0 / 5);
printf("avg time (parallel) : %.0fms\n",
avg_time[2] * 1000.0 / 5);
printf("avg time (simd+parallel) : %.0fms\n\n",
avg_time[3] * 1000.0 / 5);
}
#endif
break;
case 1: {
printf("\nStarting serial, scalar Mandelbrot...\n");
timer.start();
output = serial_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
printf("Calculation finished. Processing time was %.0fms\n",
timer.get_time() * 1000.0);
printf("Saving image as mandelbrot_serial.png\n");
write_image("mandelbrot_serial.png", width, height, output);
_mm_free(output);
break;
}
case 2: {
printf("\nStarting OMP SIMD Mandelbrot...\n");
timer.start();
output = simd_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
printf("Calculation finished. Processing time was %.0fms\n",
timer.get_time() * 1000.0);
printf("Saving image as mandelbrot_simd.png\n");
write_image("mandelbrot_simd.png", width, height, output);
_mm_free(output);
break;
}
case 3: {
printf("\nStarting OMP Parallel Mandelbrot...\n");
timer.start();
output = parallel_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
printf("Calculation finished. Processing time was %.0fms\n",
timer.get_time() * 1000.0);
printf("Saving image as mandelbrot_parallel.png\n");
write_image("mandelbrot_parallel.png", width, height, output);
_mm_free(output);
break;
}
case 4: {
printf("\nStarting OMP Mandelbrot...\n");
timer.start();
output = omp_mandelbrot(x0, y0, x1, y1, width, height, max_depth);
timer.stop();
printf("Calculation finished. Processing time was %.0fms\n",
timer.get_time() * 1000.0);
printf("Saving image as mandelbrot_simd_parallel.png\n");
write_image("mandelbrot_simd_parallel.png", width, height, output);
_mm_free(output);
break;
}
default: {
printf("Please pick a valid option\n");
break;
}
}
#endif
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CombinationalLogic/MandelbrotOMP/src/timer.cpp | //==============================================================
//
// Copyright 2020 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ===============================================================
#include "timer.hpp"
#include <chrono>
using namespace std::chrono;
// Description:
// Registers the current clock tick value in m_start_clock_tick, current time
// value in m_start_time Microsoft Windows* uses __rdtsc for clock ticks and
// QueryPerformanceFrequency/QueryPerformanceCounter for time Linux*/OS X* uses
// the rdtsc instruction for clock ticks and get_timeofday for time
void CUtilTimer::start() { m_start_time = high_resolution_clock::now(); }
// Description:
// Registers the current clock tick value in m_end_clock_tick, current time
// value in m_end_time Windows uses __rdtsc for clock ticks and
// QueryPerformanceFrequency/QueryPerformanceCounter for time Linux*/OS X* uses
// the rdtsc instruction for clock ticks and get_timeofday for time
void CUtilTimer::stop() { m_end_time = high_resolution_clock::now(); }
// Description:
// Returns the number of seconds taken between start and stop
double CUtilTimer::get_time() {
duration<double> time_span =
duration_cast<duration<double> >(m_end_time - m_start_time);
return time_span.count();
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CombinationalLogic/MandelbrotOMP/src/mandelbrot.cpp | //==============================================================
//
// Copyright 2020 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ===============================================================
// Each of these methods calculate how deeply numbers on a complex plane remains
// in the Mandelbrot set. On top of the serial/scalar version, there is a
// cilk_for version, a pragma simd version, and a combined cilk_for/pragma simd
// version
#include "mandelbrot.hpp"
#include <complex>
#if defined(__INTEL_COMPILER) || defined(__INTEL_LLVM_COMPILER)
#include <omp.h>
#endif
#include <emmintrin.h>
// Description:
// Determines how deeply points in the complex plane, spaced on a uniform grid,
// remain in the Mandelbrot set. The uniform grid is specified by the rectangle
// (x1, y1) - (x0, y0). Mandelbrot set is determined by remaining bounded after
// iteration of z_n+1 = z_n^2 + c, up to max_depth.
//
// Everything is done in a linear, scalar fashion
//
// [in]: x0, y0, x1, y1, width, height, max_depth
// [out]: output (caller must deallocate)
unsigned char* serial_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth) {
double xstep = (x1 - x0) / width;
double ystep = (y1 - y0) / height;
unsigned char* output = static_cast<unsigned char*>(
_mm_malloc(width * height * sizeof(unsigned char), 64));
// Traverse the sample space in equally spaced steps with width * height
// samples
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
double z_real = x0 + i * xstep;
double z_imaginary = y0 + j * ystep;
double c_real = z_real;
double c_imaginary = z_imaginary;
// depth should be an int, but the vectorizer will not vectorize,
// complaining about mixed data types switching it to double is worth the
// small cost in performance to let the vectorizer work
double depth = 0;
// Figures out how many recurrences are required before divergence, up to
// max_depth
while (depth < max_depth) {
if (z_real * z_real + z_imaginary * z_imaginary > 4.0) {
break; // Escape from a circle of radius 2
}
double temp_real = z_real * z_real - z_imaginary * z_imaginary;
double temp_imaginary = 2.0 * z_real * z_imaginary;
z_real = c_real + temp_real;
z_imaginary = c_imaginary + temp_imaginary;
++depth;
}
output[j * width + i] = static_cast<unsigned char>(
static_cast<double>(depth) / max_depth * 255);
}
}
return output;
}
#if defined(__INTEL_COMPILER) || defined(__INTEL_LLVM_COMPILER)
#define NUM_THREADS \
8 // USER: Experiment with various threadcounts for parallelization
// Description:
// Determines how deeply points in the complex plane, spaced on a uniform grid,
// remain in the Mandelbrot set. The uniform grid is specified by the rectangle
// (x1, y1) - (x0, y0). Mandelbrot set is determined by remaining bounded after
// iteration of z_n+1 = z_n^2 + c, up to max_depth.
//
// Optimized with OpenMP's SIMD constructs.
//
// [in]: x0, y0, x1, y1, width, height, max_depth
// [out]: output (caller must deallocate)
unsigned char* simd_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth) {
double xstep = (x1 - x0) / width;
double ystep = (y1 - y0) / height;
unsigned char* output = static_cast<unsigned char*>(
_mm_malloc(width * height * sizeof(unsigned char), 64));
// Traverse the sample space in equally spaced steps with width * height
// samples
for (int j = 0; j < height; ++j) {
#pragma omp simd // vectorize code
for (int i = 0; i < width; ++i) {
double z_real = x0 + i * xstep;
double z_imaginary = y0 + j * ystep;
double c_real = z_real;
double c_imaginary = z_imaginary;
// depth should be an int, but the vectorizer will not vectorize,
// complaining about mixed data types switching it to double is worth the
// small cost in performance to let the vectorizer work
double depth = 0;
// Figures out how many recurrences are required before divergence, up to
// max_depth
while (depth < max_depth) {
if (z_real * z_real + z_imaginary * z_imaginary > 4.0) {
break; // Escape from a circle of radius 2
}
double temp_real = z_real * z_real - z_imaginary * z_imaginary;
double temp_imaginary = 2.0 * z_real * z_imaginary;
z_real = c_real + temp_real;
z_imaginary = c_imaginary + temp_imaginary;
++depth;
}
output[j * width + i] = static_cast<unsigned char>(
static_cast<double>(depth) / max_depth * 255);
}
}
return output;
}
// Description:
// Determines how deeply points in the complex plane, spaced on a uniform grid,
// remain in the Mandelbrot set. The uniform grid is specified by the rectangle
// (x1, y1) - (x0, y0). Mandelbrot set is determined by remaining bounded after
// iteration of z_n+1 = z_n^2 + c, up to max_depth.
//
// Optimized with OpenMP's parallelization constructs.
//
// [in]: x0, y0, x1, y1, width, height, max_depth
// [out]: output (caller must deallocate)
unsigned char* parallel_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth) {
double xstep = (x1 - x0) / width;
double ystep = (y1 - y0) / height;
unsigned char* output = static_cast<unsigned char*>(
_mm_malloc(width * height * sizeof(unsigned char), 64));
omp_set_num_threads(NUM_THREADS);
// Traverse the sample space in equally spaced steps with width * height
// samples
#pragma omp parallel for schedule( \
dynamic, 1) // USER: Experiment with static/dynamic partitioning
// dynamic partitioning is advantageous as the while loop for calculating
// depth makes iterations vary in terms of time.
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
double z_real = x0 + i * xstep;
double z_imaginary = y0 + j * ystep;
double c_real = z_real;
double c_imaginary = z_imaginary;
// depth should be an int, but the vectorizer will not vectorize,
// complaining about mixed data types switching it to double is worth the
// small cost in performance to let the vectorizer work
double depth = 0;
// Figures out how many recurrences are required before divergence, up to
// max_depth
while (depth < max_depth) {
if (z_real * z_real + z_imaginary * z_imaginary > 4.0) {
break; // Escape from a circle of radius 2
}
double temp_real = z_real * z_real - z_imaginary * z_imaginary;
double temp_imaginary = 2.0 * z_real * z_imaginary;
z_real = c_real + temp_real;
z_imaginary = c_imaginary + temp_imaginary;
++depth;
}
output[j * width + i] = static_cast<unsigned char>(
static_cast<double>(depth) / max_depth * 255);
}
}
return output;
}
// Description:
// Determines how deeply points in the complex plane, spaced on a uniform grid,
// remain in the Mandelbrot set. The uniform grid is specified by the rectangle
// (x1, y1) - (x0, y0). Mandelbrot set is determined by remaining bounded after
// iteration of z_n+1 = z_n^2 + c, up to max_depth.
//
// Optimized with OpenMP's parallelization and SIMD constructs.
//
// [in]: x0, y0, x1, y1, width, height, max_depth
// [out]: output (caller must deallocate)
unsigned char* omp_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth) {
double xstep = (x1 - x0) / width;
double ystep = (y1 - y0) / height;
unsigned char* output = static_cast<unsigned char*>(
_mm_malloc(width * height * sizeof(unsigned char), 64));
omp_set_num_threads(NUM_THREADS);
// Traverse the sample space in equally spaced steps with width * height
// samples
#pragma omp parallel for schedule( \
dynamic, 1) // USER: Experiment with static/dynamic partitioning
// dynamic partitioning is advantageous as the while loop for calculating
// depth makes iterations vary in terms of time.
for (int j = 0; j < height; ++j) {
#pragma omp simd // vectorize code
for (int i = 0; i < width; ++i) {
double z_real = x0 + i * xstep;
double z_imaginary = y0 + j * ystep;
double c_real = z_real;
double c_imaginary = z_imaginary;
// depth should be an int, but the vectorizer will not vectorize,
// complaining about mixed data types switching it to double is worth the
// small cost in performance to let the vectorizer work
double depth = 0;
// Figures out how many recurrences are required before divergence, up to
// max_depth
while (depth < max_depth) {
if (z_real * z_real + z_imaginary * z_imaginary > 4.0) {
break; // Escape from a circle of radius 2
}
double temp_real = z_real * z_real - z_imaginary * z_imaginary;
double temp_imaginary = 2.0 * z_real * z_imaginary;
z_real = c_real + temp_real;
z_imaginary = c_imaginary + temp_imaginary;
++depth;
}
output[j * width + i] = static_cast<unsigned char>(
static_cast<double>(depth) / max_depth * 255);
}
}
return output;
}
#endif // __INTEL_COMPILER or __INTEL_LLVM_COMPILER
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CombinationalLogic/MandelbrotOMP/src/mandelbrot.hpp | //==============================================================
//
// Copyright 2020 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ===============================================================
#ifndef MANDELBROT_H
#define MANDELBROT_H
// Checks how many iterations of the complex quadratic polynomial z_n+1 = z_n^2
// + c keeps a set of complex numbers bounded, to a certain max depth. Mapping
// of these depths to a complex plane will result in the telltale mandelbrot set
// image Uses strictly scalar methods to calculate number of iterations (depth)
unsigned char* serial_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth);
// Checks how many iterations of the complex quadratic polynomial z_n+1 = z_n^2
// + c keeps a set of complex numbers bounded, to a certain max depth. Mapping
// of these depths to a complex plane will result in the telltale mandelbrot set
// image Uses OpenMP SIMD for optimization
unsigned char* simd_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth);
// Checks how many iterations of the complex quadratic polynomial z_n+1 = z_n^2
// + c keeps a set of complex numbers bounded, to a certain max depth. Mapping
// of these depths to a complex plane will result in the telltale mandelbrot set
// image Uses OpenMP Parallelization for optimization
unsigned char* parallel_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth);
// Checks how many iterations of the complex quadratic polynomial z_n+1 = z_n^2
// + c keeps a set of complex numbers bounded, to a certain max depth Mapping of
// these depths to a complex plane will result in the telltale mandelbrot set
// image Uses OpenMP SIMD + Parallelization for optimization
unsigned char* omp_mandelbrot(double x0, double y0, double x1, double y1,
int width, int height, int max_depth);
#endif // MANDELBROT_H
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/CombinationalLogic/MandelbrotOMP/src/timer.hpp | //==============================================================
//
// Copyright 2020 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ===============================================================
#ifndef TIMER_H
#define TIMER_H
#include <chrono>
using namespace std::chrono;
class CUtilTimer {
public:
// Registers the current clock tick and time value in m_start_clock_tick and
// m_start_time
void start();
// Registers the current clock tick and time value in m_end_clock_tick and
// m_end_time
void stop();
// Returns the number of seconds taken between start and stop
double get_time();
private:
// start and end time
high_resolution_clock::time_point m_start_time, m_end_time;
};
#endif // TIMER_H
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/intro/simple_solution.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <iostream>
constexpr int N = 16;
int main() {
int is_cpu = true;
int *data = static_cast<int *>(malloc(N * sizeof(int)));
// Initialization
for (int i = 0; i < N; i++) data[i] = i;
// Add the target directive here, including the map clause.
#pragma omp target map(from : is_cpu) map(tofrom : data [0:N])
{
is_cpu = omp_is_initial_device();
#pragma omp parallel for
for (int i = 0; i < N; i++) {
data[i] *= 2;
}
}
// Print Output
std::cout << "Running on " << (is_cpu ? "CPU" : "GPU") << "\n";
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/parallelism/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
constexpr int ARRAY_SIZE = 256;
constexpr int NUM_BLOCKS = 9;
int main(int argc, char *argv[]) {
int i, ib, is_cpu = 1, num_teams = 0;
double tstart, tstop;
float x[ARRAY_SIZE], y[ARRAY_SIZE];
float a = 1.0f;
float tolerance = 0.01f;
int correct_count = 0;
// Initialize some data
for (i = 0; i < ARRAY_SIZE; i++) {
x[i] = (float)i;
y[i] = (float)i;
}
tstart = omp_get_wtime();
#include "lab/saxpy_func_parallel.cpp"
tstop = omp_get_wtime();
printf("Number of OpenMP Devices Available: %d\n", omp_get_num_devices());
printf("Running on %s.\n", is_cpu ? "CPU" : "GPU");
printf("Work took %f seconds.\n", tstop - tstart);
printf("Number of Teams Created: %d\n", num_teams);
for (int i = 0; i < ARRAY_SIZE; i++)
if (abs(y[i] - (a * i + i)) < tolerance)
correct_count++;
else {
printf("Incorrect Result at Element y[%d] : %f\n", i, y[i]);
printf("Expected: %f\n", a*i+i);
break;
}
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/parallelism/saxpy_func_parallel_solution.cpp | #pragma omp target map(from: is_cpu) map(from:num_teams) map(to:x[0:ARRAY_SIZE]) map(tofrom:y[0:ARRAY_SIZE])
{
// 1. Add pragma to create multiple master threads use clause num_teams(NUM_BLOCKS)
// and distribute loop iterations to the various master threads.
#pragma omp teams distribute num_teams(NUM_BLOCKS)
for (ib = 0; ib < ARRAY_SIZE; ib += NUM_BLOCKS) {
if (ib == 0) {
// Test if target is the CPU Host or the GPU Device
is_cpu = omp_is_initial_device();
// Query number of teams created
num_teams = omp_get_num_teams();
}
// 2. Place the combined pragma here to create a team of threads for each master thread
// Distribute iterations to those threads, and vectorize
#pragma omp parallel for simd
for (i = ib; i < ib + NUM_BLOCKS; i++) {
y[i] = a * x[i] + y[i];
}
}
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/parallelism/main_test.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
constexpr int ARRAY_SIZE = 256;
constexpr int NUM_BLOCKS = 9;
int main(int argc, char *argv[]) {
int i, ib, is_cpu = 1, num_teams = 0;
double tstart, tstop;
float x[ARRAY_SIZE], y[ARRAY_SIZE];
float a = 1.0f;
float tolerance = 0.01f;
int correct_count = 0;
// Initialize some data
for (i = 0; i < ARRAY_SIZE; i++) {
x[i] = (float)i;
y[i] = (float)i;
}
tstart = omp_get_wtime();
#include "saxpy_func_parallel_solution.cpp"
tstop = omp_get_wtime();
printf("Number of OpenMP Devices Available: %d\n", omp_get_num_devices());
printf("Running on %s.\n", is_cpu ? "CPU" : "GPU");
printf("Work took %f seconds.\n", tstop - tstart);
printf("Number of Teams Created: %d\n", num_teams);
for (int i = 0; i < ARRAY_SIZE; i++)
if (abs(y[i] - (a * i + i)) < tolerance)
correct_count++;
else {
printf("Incorrect Result at Element [%d] : %f\n", i, y[i]);
break;
}
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/USM/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
constexpr int ARRAY_SIZE = 256;
void init1(float *x, int N) {
for (int i = 0; i < N; i++) x[i] = 1.0;
}
void init2(float *x, int N) {
for (int i = 0; i < N; i++) x[i] = 2.0;
}
int main() {
int deviceId = (omp_get_num_devices() > 0) ? omp_get_default_device()
: omp_get_initial_device();
#include "lab/alloc_func.cpp"
double tb, te;
int correct_count = 0;
init1(x, ARRAY_SIZE);
init1(y, ARRAY_SIZE);
printf("Number of OpenMP Devices: %d\n", omp_get_num_devices());
tb = omp_get_wtime();
#pragma omp target
{
for (int i = 0; i < ARRAY_SIZE; i++) x[i] += y[i];
}
init2(y, ARRAY_SIZE);
#pragma omp target
{
for (int i = 0; i < ARRAY_SIZE; i++) x[i] += y[i];
}
te = omp_get_wtime();
printf("Time of kernel: %lf seconds\n", te - tb);
for (int i = 0; i < ARRAY_SIZE; i++)
if (x[i] == 4.0) correct_count++;
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
omp_target_free(x, deviceId);
omp_target_free(y, deviceId);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/USM/alloc_func_solution.cpp | // Allocate Shared Memory
float *x =
(float *)omp_target_alloc_shared(ARRAY_SIZE * sizeof(float), deviceId);
float *y =
(float *)omp_target_alloc_shared(ARRAY_SIZE * sizeof(float), deviceId); | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/USM/usm_explicit.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
constexpr int ARRAY_SIZE = 256;
void init1(float *x, int N) {
for (int i = 0; i < N; i++) x[i] = 1.0;
}
void init2(float *x, int N) {
for (int i = 0; i < N; i++) x[i] = 2.0;
}
int main() {
int deviceId = (omp_get_num_devices() > 0) ? omp_get_default_device()
: omp_get_initial_device();
// Allocate memory on host
float *x = (float *)malloc(ARRAY_SIZE * sizeof(float));
float *y = (float *)malloc(ARRAY_SIZE * sizeof(float));
double tb, te;
int correct_count = 0;
init1(x, ARRAY_SIZE);
init1(y, ARRAY_SIZE);
printf("Number of OpenMP Devices: %d\n", omp_get_num_devices());
tb = omp_get_wtime();
// Allocate memory on device
float *x_dev =
(float *)omp_target_alloc_device(ARRAY_SIZE * sizeof(float), deviceId);
float *y_dev =
(float *)omp_target_alloc_device(ARRAY_SIZE * sizeof(float), deviceId);
// Explicit data movement from Host to device
int error = omp_target_memcpy(x_dev, x, ARRAY_SIZE * sizeof(float), 0, 0,
deviceId, 0);
error = omp_target_memcpy(y_dev, y, ARRAY_SIZE * sizeof(float), 0, 0,
deviceId, 0);
#pragma omp target
{
for (int i = 0; i < ARRAY_SIZE; i++) x_dev[i] += y_dev[i];
}
// Explicit Data Movement from Device to Host
error = omp_target_memcpy(x, x_dev, ARRAY_SIZE * sizeof(float), 0, 0, 0,
deviceId);
error = omp_target_memcpy(y, y_dev, ARRAY_SIZE * sizeof(float), 0, 0, 0,
deviceId);
init2(y, ARRAY_SIZE);
// Explicit data movement from Host to device
error = omp_target_memcpy(x_dev, x, ARRAY_SIZE * sizeof(float), 0, 0,
deviceId, 0);
error = omp_target_memcpy(y_dev, y, ARRAY_SIZE * sizeof(float), 0, 0,
deviceId, 0);
#pragma omp target
{
for (int i = 0; i < ARRAY_SIZE; i++) x_dev[i] += y_dev[i];
}
// Explicit Data Movement from Device to Host
error = omp_target_memcpy(x, x_dev, ARRAY_SIZE * sizeof(float), 0, 0, 0,
deviceId);
error = omp_target_memcpy(y, y_dev, ARRAY_SIZE * sizeof(float), 0, 0, 0,
deviceId);
te = omp_get_wtime();
printf("Time of kernel: %lf seconds\n", te - tb);
for (int i = 0; i < ARRAY_SIZE; i++)
if (x[i] == 4.0) correct_count++;
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
omp_target_free(x_dev, deviceId);
omp_target_free(y_dev, deviceId);
free(x);
free(y);
return EXIT_SUCCESS;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/datatransfer/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
constexpr int ARRAY_SIZE = 256;
int main(int argc, char *argv[]) {
int i, j, is_cpu = true;
double tstart, tstop;
float x[ARRAY_SIZE], y[ARRAY_SIZE];
float a = 1.5f;
float tolerance = 0.01f;
int correct_count = 0;
// Initialize some data
for (i = 0; i < ARRAY_SIZE; i++) {
x[i] = (float)i;
y[i] = (float)i;
}
tstart = omp_get_wtime();
#include "lab/saxpy_func.cpp"
tstop = omp_get_wtime();
printf("Work took %f seconds\n", tstop - tstart);
printf("Running on %s.\n", is_cpu ? "CPU" : "GPU");
for (int i = 0; i < ARRAY_SIZE; i++)
if (abs(y[i] - (a * i + i)) < tolerance)
correct_count++;
else {
printf("Incorrect Result at Element [%d] : %f", i, y[i]);
break;
}
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/datatransfer/saxpy_func_solution.cpp | // Add the target pragma with the map clauses here
#pragma omp target map(tofrom : y) map(to : x) map(from : is_cpu)
{
is_cpu = omp_is_initial_device();
for (i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/datatransfer/main_data_region.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
constexpr int ARRAY_SIZE = 256;
void init1(float *x, int N) {
for (int i = 0; i < N; i++) x[i] = 1.0;
}
void init2(float *x, int N) {
for (int i = 0; i < N; i++) x[i] = 2.0;
}
int main() {
float x[ARRAY_SIZE], y[ARRAY_SIZE];
double tb, te;
int correct_count = 0;
init1(x, ARRAY_SIZE);
init1(y, ARRAY_SIZE);
printf("Number of OpenMP Devices: %d\n", omp_get_num_devices());
tb = omp_get_wtime();
#include "lab/target_data_region.cpp"
te = omp_get_wtime();
printf("Time of kernel: %lf seconds\n", te - tb);
for (int i = 0; i < ARRAY_SIZE; i++)
if (x[i] == 4.0) correct_count++;
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/datatransfer/saxpy_func_solution_length.cpp | // Add the target pragma with the map clauses here
#pragma omp target map(tofrom: y[0:ARRAY_SIZE]) map(to: x[0:ARRAY_SIZE]) map(from: is_cpu)
{
is_cpu = omp_is_initial_device();
for (i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/datatransfer/main_test.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
constexpr int ARRAY_SIZE = 256;
int main(int argc, char *argv[]) {
int i, j, is_cpu = true;
double tstart, tstop;
float x[ARRAY_SIZE], y[ARRAY_SIZE];
float a = 1.5f;
float tolerance = 0.01f;
int correct_count = 0;
// Initialize some data
for (i = 0; i < ARRAY_SIZE; i++) {
x[i] = (float)i;
y[i] = (float)i;
}
tstart = omp_get_wtime();
#include "saxpy_func_solution.cpp"
tstop = omp_get_wtime();
printf("Work took %f seconds\n", tstop - tstart);
printf("Running on %s.\n", is_cpu ? "CPU" : "GPU");
for (int i = 0; i < ARRAY_SIZE; i++)
if (abs(y[i] - (a * i + i)) < tolerance)
correct_count++;
else {
printf("Incorrect Result at Element [%d] : %f", i, y[i]);
break;
}
printf("Test: %s\n", (correct_count == ARRAY_SIZE) ? "PASSED!" : "Failed");
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/Jupyter/OpenMP-offload-training/datatransfer/target_data_region_solution.cpp | // Solution Using Target Data
#pragma omp target data map(tofrom : x)
{
#pragma omp target map(to : y)
{
for (int i = 0; i < ARRAY_SIZE; i++) x[i] += y[i];
}
init2(y, ARRAY_SIZE);
#pragma omp target map(to : y)
{
for (int i = 0; i < ARRAY_SIZE; i++) x[i] += y[i];
}
}
// Solution Using Target Enter/Exit/Update
#pragma omp target enter data map(to : x) map(to : y)
#pragma omp target
{
for (int i = 0; i < ARRAY_SIZE; i++) x[i] += y[i];
}
init2(y, ARRAY_SIZE);
#pragma omp target update to(y)
#pragma omp target
{
for (int i = 0; i < ARRAY_SIZE; i++) x[i] += y[i];
}
#pragma omp target exit data map(from : x) | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/ParallelPatterns/openmp_reduction/src/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iomanip> // setprecision library
#include <iostream>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities//include/dpc_common.hpp
#include "dpc_common.hpp"
// cpu_seq_calc_pi is a simple sequential CPU routine
// that calculates all the slices and then
// does a reduction.
float cpu_seq_calc_pi(int num_steps) {
float step = 1.0 / (float)num_steps;
float x;
float pi;
float sum = 0.0;
for (int i = 1; i < num_steps; i++) {
x = ((float)i - 0.5f) * step;
sum = sum + 4.0f / (1.0f + x * x);
}
pi = sum * step;
return pi;
}
// openmp_host_calc_pi is a simple parallel
// calcuation that uses openmp running
// on the host. By default openmp
// will use all the cores available
// and execute the code in parallel and
// then perform a reduction.
float openmp_host_calc_pi(int num_steps) {
float step = (1.0f / num_steps);
float pi = 0.0;
float sum = 0.0;
#pragma omp parallel for reduction(+ : sum)
for (int i = 1; i < num_steps; i++) {
float x = ((float)i - 0.5f) * step;
sum = sum + 4.0f / (1.0f + x * x);
}
pi = step * sum;
return pi;
}
// openmp_device_calc_pi is a simple parallel
// calcuation that uses openmp running
// on the device through the use of the
// target specifier.
// This will execute the code in parallel.
float openmp_device_calc_pi(int num_steps) {
float pi = 0.0;
float step = (1.0f / num_steps);
float sum = 0.0;
#pragma omp target teams distribute parallel for reduction(+ : sum)
for (int i = 1; i < num_steps; i++) {
float x = ((float)i - 0.5f) * step;
sum = sum + 4.0f / (1.0f + x * x);
}
pi = sum * step;
return pi;
}
int main(int argc, char** argv) {
int num_steps = 1000000;
printf("Number of steps is %d\n", num_steps);
float pi;
// Due to the overhead associated with
// JIT, run the offload calculation once
// that allows code to be compiled. Execution
// time is measured the 2nd time you run it.
pi = openmp_device_calc_pi(num_steps);
dpc_common::TimeInterval T;
pi = cpu_seq_calc_pi(num_steps);
auto stop = T.Elapsed();
std::cout << "Cpu Seq calc: \t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop << " seconds"
<< "\n";
dpc_common::TimeInterval T2;
pi = openmp_host_calc_pi(num_steps);
auto stop2 = T2.Elapsed();
std::cout << "Host OpenMP:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop2 << " seconds"
<< "\n";
dpc_common::TimeInterval T3;
pi = openmp_device_calc_pi(num_steps);
auto stop3 = T3.Elapsed();
std::cout << "Offload OpenMP:\t\t";
std::cout << std::setprecision(3) << "PI =" << pi;
std::cout << " in " << stop3 << " seconds"
<< "\n";
std::cout << "success\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++/GraphTraversal/MergesortOMP/src/merge_sort.cpp | #define _CRT_SECURE_NO_DEPRECATE
#include <omp.h>
#include <algorithm>
#include <chrono>
#include <iostream>
constexpr int task_threshold = 5000;
constexpr int n = 100000000;
// Description:
// Initializes the array, and shuffle all elements in it.
//
// [in]: a Array to be initialized.
// n Length of array.
// [out]: None.
void InitializeArray(int a[], int n) {
for (int i = 0; i < n; ++i) a[i] = i;
printf("Shuffling the array\n");
std::random_shuffle(a, a + n);
}
// Description:
// Checks that array is sorted and each element contains the index.
//
// [in]: a Array to be initialized.
// n Length of array.
// [out]: Return 0 if no error is found. Otherwise, return 1.
int CheckArray(int a[], int n) {
for (int i = 0; i < n - 1; ++i) {
if (a[i] >= a[i + 1] || a[i] != i) {
printf("Sort failed at location %d, a[i]=%d a[i+1]=%d\n", i, a[i],
a[i + 1]);
return 1;
}
}
// no error
return 0;
}
// Description:
// Merges two sublists of array
//
// [in]: a Array to be sorted.
// tmp_a Temporary array to contain sorted numbers.
// first Index of first element of first sublist to be merged in array
// a. middle Index of first element of second sublist to be merged in
// array a. last Index of last element of second sublist to be merged
// in array a.
// [out]: a
void Merge(int a[], int tmp_a[], int first, int middle, int last) {
// Merge two portions of array, and put result into a temporary array tmp_a[]
int p1 = first;
int p2 = middle;
int p = first;
while (p <= last) {
if (p1 < middle && (p2 > last || a[p1] < a[p2])) {
tmp_a[p++] = a[p1++];
} else {
tmp_a[p++] = a[p2++];
}
}
// Copy sorted portion from the temporary array to the original array
for (int i = first; i <= last; ++i) {
a[i] = tmp_a[i];
}
}
// Description:
// Sort the list starting from first to last.
// Use the Merge Sort algorithm, using recursive divide and conquer.
//
// [in]: a Array to be sorted.
// tmp_a Temporary array to contain sorted sublists.
// first Index of first element of the list to be sorted in array a.
// last Index of last element of the list to be sorted in array a.
// [out]: a
void MergeSort(int a[], int tmp_a[], int first, int last) {
if (first < last) {
int middle = (first + last + 1) / 2; // = first + (last - first + 1) / 2;
// Splits list a[first:last] into two halves (called sublists).
// One is [first:middle-1], and another is [middle:last].
MergeSort(a, tmp_a, first, middle - 1);
MergeSort(a, tmp_a, middle, last);
Merge(a, tmp_a, first, middle, last);
}
}
// Description:
// OpenMP Task version of merge_sort
void MergeSortOpenMP(int a[], int tmp_a[], int first, int last) {
if (first < last) {
int middle = (first + last + 1) / 2; // = first + (last - first + 1) / 2;
// Splits list a[first:last] into two halves (called sublists).
// One is [first:middle-1], and another is [middle:last].
// For sake of performance, only when the list is big enough,
// we create tasks with #pragma omp task.
if (last - first < task_threshold) {
MergeSort(a, tmp_a, first, middle - 1);
MergeSort(a, tmp_a, middle, last);
} else {
#pragma omp task
MergeSortOpenMP(a, tmp_a, first, middle - 1);
#pragma omp task
MergeSortOpenMP(a, tmp_a, middle, last);
#pragma omp taskwait
}
Merge(a, tmp_a, first, middle, last);
}
}
int main(int argc, char *argv[]) {
std::chrono::time_point<std::chrono::system_clock> start1, start2, end1, end2;
std::chrono::duration<double> elapsed_seconds_serial, elapsed_seconds_openmp;
printf("N = %d\n", n);
int *a = new int[n];
int *tmp_a = new int[n];
int option = 0;
// If PERF_NUM is defined, then no options taken...run all tests
#ifndef PERF_NUM
// Checks to see if option was given at command line
if (argc > 1) {
// Prints out instructions and quits
if (argv[1][0] == 'h') {
printf("Merge Sort Sample\n");
printf("[0] all tests\n[1] serial\n[2] OpenMP Task\n");
#ifdef _WIN32
system("PAUSE");
#endif // _WIN32
return 0;
}
// option is assumed an option
else {
option = atoi(argv[1]);
}
}
// If no options are given, prompt user to choose an option
else {
printf("Merge Sort Sample\n");
printf("[0] all tests\n[1] serial\n[2] OpenMP Task\n");
scanf("%i", &option);
}
#else // !PERF_NUM
//#ifdef PERF_NUM
double avg_time[2];
#endif // PERF_NUM
switch (option) {
case 0:
printf("\nRunning all tests\n");
#ifdef PERF_NUM
for (int i = 0; i < 5; ++i) {
#endif // PERF_NUM
printf("\nSerial version:\n");
InitializeArray(a, n);
printf("Sorting\n");
start1 = std::chrono::system_clock::now();
MergeSort(a, tmp_a, 0, n - 1);
end1 = std::chrono::system_clock::now();
elapsed_seconds_serial = end1 - start1;
// Confirm that a is sorted and that each element contains the index.
if (CheckArray(a, n)) {
delete[] tmp_a;
delete[] a;
return 1;
}
std::cout << "Sort succeeded in " << elapsed_seconds_serial.count()
<< " seconds.\n";
std::cout << "\nOpenMP Task Version:\n";
InitializeArray(a, n);
printf("Sorting\n");
start2 = std::chrono::system_clock::now();
#pragma omp parallel
{
#pragma omp single
{ MergeSortOpenMP(a, tmp_a, 0, n - 1); }
}
end2 = std::chrono::system_clock::now();
elapsed_seconds_openmp = end2 - start2;
// Confirm that a is sorted and that each element contains the index.
if (CheckArray(a, n)) {
delete[] tmp_a;
delete[] a;
return 1;
}
std::cout << "Sort succeeded in " << elapsed_seconds_openmp.count()
<< " seconds.\n";
#ifdef PERF_NUM
avg_time[0] += elapsed_seconds_serial.count();
avg_time[1] += elapsed_seconds_openmp.count();
}
printf("\n");
printf("avg time of serial version: %.0fms\n",
avg_time[0] * 1000.0 / 5);
printf("avg time of OpenMP Task version: %.0fms\n",
avg_time[1] * 1000.0 / 5);
#endif // PERF_NUM
break;
case 1:
printf("\nSerial version:\n");
InitializeArray(a, n);
printf("Sorting\n");
start1 = std::chrono::system_clock::now();
MergeSort(a, tmp_a, 0, n - 1);
end1 = std::chrono::system_clock::now();
elapsed_seconds_serial = end1 - start1;
// Confirm that a is sorted and that each element contains the index.
if (CheckArray(a, n)) {
delete[] tmp_a;
delete[] a;
return 1;
}
std::cout << "Sort succeeded in " << elapsed_seconds_serial.count()
<< " seconds.\n";
break;
case 2:
printf("\nOpenMP version:\n");
InitializeArray(a, n);
printf("Sorting\n");
start1 = std::chrono::system_clock::now();
#pragma omp parallel
{
#pragma omp single
{ MergeSortOpenMP(a, tmp_a, 0, n - 1); }
}
end1 = std::chrono::system_clock::now();
elapsed_seconds_openmp = end1 - start1;
// Confirm that a is sorted and that each element contains the index.
if (CheckArray(a, n)) {
delete[] tmp_a;
delete[] a;
return 1;
}
std::cout << "Sort succeeded in " << elapsed_seconds_openmp.count()
<< " seconds.\n";
break;
default:
printf("Please pick a valid option\n");
break;
}
delete[] tmp_a;
delete[] a;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:1: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:2: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:4: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_175225 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_319217 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:6: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:7: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:8: The "devID" device may be not the one intended for use. Adjust the
selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:9: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:10: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:11: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:12: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:14: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator_common.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUASIRANDOMGENERATOR_COMMON_H
#define QUASIRANDOMGENERATOR_COMMON_H
////////////////////////////////////////////////////////////////////////////////
// Global types and constants
////////////////////////////////////////////////////////////////////////////////
typedef long long int INT64;
#define QRNG_DIMENSIONS 2
#define QRNG_RESOLUTION 31
#define INT_SCALE (1.0f / (float)0x80000001U)
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator_gold.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <math.h>
#include "quasirandomGenerator_common.h"
////////////////////////////////////////////////////////////////////////////////
// Table generation functions
////////////////////////////////////////////////////////////////////////////////
// Internal 64(63)-bit table
static INT64 cjn[63][QRNG_DIMENSIONS];
static int GeneratePolynomials(int buffer[QRNG_DIMENSIONS], bool primitive) {
int i, j, n, p1, p2, l;
int e_p1, e_p2, e_b;
// generate all polynomials to buffer
for (n = 1, buffer[0] = 0x2, p2 = 0, l = 0; n < QRNG_DIMENSIONS; ++n) {
// search for the next irreducible polynomial
for (p1 = buffer[n - 1] + 1;; ++p1) {
// find degree of polynomial p1
for (e_p1 = 30; (p1 & (1 << e_p1)) == 0; --e_p1) {
}
// try to divide p1 by all polynomials in buffer
for (i = 0; i < n; ++i) {
// find the degree of buffer[i]
for (e_b = e_p1; (buffer[i] & (1 << e_b)) == 0; --e_b) {
}
// divide p2 by buffer[i] until the end
for (p2 = (buffer[i] << ((e_p2 = e_p1) - e_b)) ^ p1; p2 >= buffer[i];
p2 = (buffer[i] << (e_p2 - e_b)) ^ p2) {
for (; (p2 & (1 << e_p2)) == 0; --e_p2) {
}
} // compute new degree of p2
// division without remainder!!! p1 is not irreducible
if (p2 == 0) {
break;
}
}
// all divisions were with remainder - p1 is irreducible
if (p2 != 0) {
e_p2 = 0;
if (primitive) {
// check that p1 has only one cycle (i.e. is monic, or primitive)
j = ~(0xffffffff << (e_p1 + 1));
e_b = (1 << e_p1) | 0x1;
for (p2 = e_b, e_p2 = (1 << e_p1) - 2; e_p2 > 0; --e_p2) {
p2 <<= 1;
i = p2 & p1;
i = (i & 0x55555555) + ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
i = (i & 0x07070707) + ((i >> 4) & 0x07070707);
p2 |= (i % 255) & 1;
if ((p2 & j) == e_b) break;
}
}
// it is monic - add it to the list of polynomials
if (e_p2 == 0) {
buffer[n] = p1;
l += e_p1;
break;
}
}
}
}
return l + 1;
}
////////////////////////////////////////////////////////////////////////////////
// @misc{Bratley92:LDS,
// author = "B. Fox and P. Bratley and H. Niederreiter",
// title = "Implementation and test of low discrepancy sequences",
// text = "B. L. Fox, P. Bratley, and H. Niederreiter. Implementation and
// test of
// low discrepancy sequences. ACM Trans. Model. Comput. Simul.,
// 2(3):195--213,
// July 1992.",
// year = "1992" }
////////////////////////////////////////////////////////////////////////////////
static void GenerateCJ() {
int buffer[QRNG_DIMENSIONS];
int *polynomials;
int n, p1, l, e_p1;
// Niederreiter (in contrast to Sobol) allows to use not primitive, but just
// irreducible polynomials
l = GeneratePolynomials(buffer, false);
// convert all polynomials from buffer to polynomials table
polynomials = new int[l + 2 * QRNG_DIMENSIONS + 1];
for (n = 0, l = 0; n < QRNG_DIMENSIONS; ++n) {
// find degree of polynomial p1
for (p1 = buffer[n], e_p1 = 30; (p1 & (1 << e_p1)) == 0; --e_p1) {
}
// fill polynomials table with values for this polynomial
polynomials[l++] = 1;
for (--e_p1; e_p1 >= 0; --e_p1) {
polynomials[l++] = (p1 >> e_p1) & 1;
}
polynomials[l++] = -1;
}
polynomials[l] = -1;
// irreducible polynomial p
int *p = polynomials, e, d;
// polynomial b
int b_arr[1024], *b, m;
// v array
int v_arr[1024], *v;
// temporary polynomial, required to do multiplication of p and b
int t_arr[1024], *t;
// subsidiary variables
int i, j, u, m1, ip, it;
// cycle over monic irreducible polynomials
for (d = 0; p[0] != -1; p += e + 2) {
// allocate memory for cj array for dimension (ip + 1)
for (i = 0; i < 63; ++i) {
cjn[i][d] = 0;
}
// determine the power of irreducible polynomial
for (e = 0; p[e + 1] != -1; ++e) {
}
// polynomial b in the beginning is just '1'
(b = b_arr + 1023)[m = 0] = 1;
// v array needs only (63 + e - 2) length
v = v_arr + 1023 - (63 + e - 2);
// cycle over all coefficients
for (j = 63 - 1, u = e; j >= 0; --j, ++u) {
if (u == e) {
u = 0;
// multiply b by p (polynomials multiplication)
for (i = 0, t = t_arr + 1023 - (m1 = m); i <= m; ++i) {
t[i] = b[i];
}
b = b_arr + 1023 - (m += e);
for (i = 0; i <= m; ++i) {
b[i] = 0;
for (ip = e - (m - i), it = m1; ip <= e && it >= 0; ++ip, --it) {
if (ip >= 0) {
b[i] ^= p[ip] & t[it];
}
}
}
// multiplication of polynomials finished
// calculate v
for (i = 0; i < m1; ++i) {
v[i] = 0;
}
for (; i < m; ++i) {
v[i] = 1;
}
for (; i <= 63 + e - 2; ++i) {
v[i] = 0;
for (it = 1; it <= m; ++it) {
v[i] ^= v[i - it] & b[it];
}
}
}
// copy calculated v to cj
for (i = 0; i < 63; i++) {
cjn[i][d] |= (INT64)v[i + u] << j;
}
}
++d;
}
delete[] polynomials;
}
// Generate 63-bit quasirandom number for given index and dimension and
// normalize
extern "C" double getQuasirandomValue63(INT64 i, int dim) {
const double INT63_SCALE = (1.0 / (double)0x8000000000000001ULL);
INT64 result = 0;
for (int bit = 0; bit < 63; bit++, i >>= 1)
if (i & 1) result ^= cjn[bit][dim];
return (double)(result + 1) * INT63_SCALE;
}
////////////////////////////////////////////////////////////////////////////////
// Initialization (table setup)
////////////////////////////////////////////////////////////////////////////////
extern "C" void initQuasirandomGenerator(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION]) {
GenerateCJ();
for (int dim = 0; dim < QRNG_DIMENSIONS; dim++)
for (int bit = 0; bit < QRNG_RESOLUTION; bit++)
table[dim][bit] = (int)((cjn[bit][dim] >> 32) & 0x7FFFFFFF);
}
////////////////////////////////////////////////////////////////////////////////
// Generate 31-bit quasirandom number for given index and dimension
////////////////////////////////////////////////////////////////////////////////
extern "C" float getQuasirandomValue(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION], int i, int dim) {
int result = 0;
for (int bit = 0; bit < QRNG_RESOLUTION; bit++, i >>= 1)
if (i & 1) result ^= table[dim][bit];
return (float)(result + 1) * INT_SCALE;
}
////////////////////////////////////////////////////////////////////////////////
// Moro's Inverse Cumulative Normal Distribution function approximation
////////////////////////////////////////////////////////////////////////////////
extern "C" double MoroInvCNDcpu(unsigned int x) {
const double a1 = 2.50662823884;
const double a2 = -18.61500062529;
const double a3 = 41.39119773534;
const double a4 = -25.44106049637;
const double b1 = -8.4735109309;
const double b2 = 23.08336743743;
const double b3 = -21.06224101826;
const double b4 = 3.13082909833;
const double c1 = 0.337475482272615;
const double c2 = 0.976169019091719;
const double c3 = 0.160797971491821;
const double c4 = 2.76438810333863E-02;
const double c5 = 3.8405729373609E-03;
const double c6 = 3.951896511919E-04;
const double c7 = 3.21767881768E-05;
const double c8 = 2.888167364E-07;
const double c9 = 3.960315187E-07;
double z;
bool negate = false;
// Ensure the conversion to floating point will give a value in the
// range (0,0.5] by restricting the input to the bottom half of the
// input domain. We will later reflect the result if the input was
// originally in the top half of the input domain
if (x >= 0x80000000UL) {
x = 0xffffffffUL - x;
negate = true;
}
// x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff])
// Convert to floating point in (0,0.5]
const double x1 = 1.0 / static_cast<double>(0xffffffffUL);
const double x2 = x1 / 2.0;
double p1 = x * x1 + x2;
// Convert to floating point in (-0.5,0]
double p2 = p1 - 0.5;
// The input to the Moro inversion is p2 which is in the range
// (-0.5,0]. This means that our output will be the negative side
// of the bell curve (which we will reflect if "negate" is true).
// Main body of the bell curve for |p| < 0.42
if (p2 > -0.42) {
z = p2 * p2;
z = p2 * (((a4 * z + a3) * z + a2) * z + a1) /
((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0);
}
// Special case (Chebychev) for tail
else {
z = log(-log(p1));
z = -(c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z *
(c7 + z * (c8 + z * c9))))))));
}
// If the original input (x) was in the top half of the range, reflect
// to get the positive side of the bell curve
return negate ? -z : z;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// CUDA Runtime
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
// Utilities and system includes
#include <helper_functions.h>
#include <helper_cuda.h>
using namespace sycl;
#include "quasirandomGenerator_common.h"
#include <cmath>
////////////////////////////////////////////////////////////////////////////////
// CPU code
////////////////////////////////////////////////////////////////////////////////
extern "C" void initQuasirandomGenerator(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION]);
extern "C" float getQuasirandomValue(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION], int i, int dim);
extern "C" double getQuasirandomValue63(INT64 i, int dim);
extern "C" double MoroInvCNDcpu(unsigned int p);
////////////////////////////////////////////////////////////////////////////////
// GPU code
////////////////////////////////////////////////////////////////////////////////
extern "C" void initTableGPU(
unsigned int tableCPU[QRNG_DIMENSIONS][QRNG_RESOLUTION], sycl::queue q_ct1);
extern "C" void quasirandomGeneratorGPU(float *d_Output, unsigned int seed,
unsigned int N, sycl::queue q_ct1);
extern "C" void inverseCNDgpu(float *d_Output, unsigned int *d_Input,
unsigned int N, sycl::queue q_ct1);
const int N = 1048576;
int main(int argc, char **argv) {
// Start logs
sycl::queue q_ct1 = sycl::queue(sycl::default_selector_v);
printf("%s Starting...\n\n", argv[0]);
std::cout << "\nRunning on " << q_ct1.get_device().get_info<info::device::name>()
<< "\n";
unsigned int tableCPU[QRNG_DIMENSIONS][QRNG_RESOLUTION];
float *h_OutputGPU, *d_Output;
int dim, pos;
double delta, ref, sumDelta, sumRef, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
if (sizeof(INT64) != 8) {
printf("sizeof(INT64) != 8\n");
return 0;
}
sdkCreateTimer(&hTimer);
printf("Allocating GPU memory...\n");
checkCudaErrors(
DPCT_CHECK_ERROR(d_Output = sycl::malloc_device<float>(
QRNG_DIMENSIONS * N, q_ct1)));
printf("Allocating CPU memory...\n");
h_OutputGPU = (float *)malloc(QRNG_DIMENSIONS * N * sizeof(float));
printf("Initializing QRNG tables...\n\n");
initQuasirandomGenerator(tableCPU);
initTableGPU(tableCPU, q_ct1);
printf("Testing QRNG...\n\n");
checkCudaErrors(DPCT_CHECK_ERROR(
q_ct1.memset(d_Output, 0, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
int numIterations = 20;
for (int i = -1; i < numIterations; i++) {
if (i == 0) {
checkCudaErrors(
DPCT_CHECK_ERROR(q_ct1.wait_and_throw()));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
}
quasirandomGeneratorGPU(d_Output, 0, N, q_ct1);
}
checkCudaErrors(
DPCT_CHECK_ERROR(q_ct1.wait_and_throw()));
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / (double)numIterations * 1e-3;
printf(
"quasirandomGenerator, Throughput = %.4f GNumbers/s, Time = %.5f s, Size "
"= %u Numbers, NumDevsUsed = %u, Workgroup = %u\n",
(double)QRNG_DIMENSIONS * (double)N * 1.0E-9 / gpuTime, gpuTime,
QRNG_DIMENSIONS * N, 1, 128 * QRNG_DIMENSIONS);
printf("\nReading GPU results...\n");
checkCudaErrors(DPCT_CHECK_ERROR(
q_ct1.memcpy(h_OutputGPU, d_Output, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
printf("Comparing to the CPU results...\n\n");
sumDelta = 0;
sumRef = 0;
for (dim = 0; dim < QRNG_DIMENSIONS; dim++)
for (pos = 0; pos < N; pos++) {
ref = getQuasirandomValue63(pos, dim);
delta = (double)h_OutputGPU[dim * N + pos] - ref;
sumDelta += fabs(delta);
sumRef += fabs(ref);
}
printf("L1 norm: %E\n", sumDelta / sumRef);
printf("\nTesting inverseCNDgpu()...\n\n");
checkCudaErrors(DPCT_CHECK_ERROR(
q_ct1.memset(d_Output, 0, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
for (int i = -1; i < numIterations; i++) {
if (i == 0) {
checkCudaErrors(
DPCT_CHECK_ERROR(q_ct1.wait_and_throw()));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
}
inverseCNDgpu(d_Output, NULL, QRNG_DIMENSIONS * N,q_ct1);
}
checkCudaErrors(
DPCT_CHECK_ERROR(q_ct1.wait_and_throw()));
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / (double)numIterations * 1e-3;
printf(
"quasirandomGenerator-inverse, Throughput = %.4f GNumbers/s, Time = %.5f "
"s, Size = %u Numbers, NumDevsUsed = %u, Workgroup = %u\n",
(double)QRNG_DIMENSIONS * (double)N * 1E-9 / gpuTime, gpuTime,
QRNG_DIMENSIONS * N, 1, 128);
printf("Reading GPU results...\n");
checkCudaErrors(DPCT_CHECK_ERROR(
q_ct1.memcpy(h_OutputGPU, d_Output, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
printf("\nComparing to the CPU results...\n");
sumDelta = 0;
sumRef = 0;
unsigned int distance = ((unsigned int)-1) / (QRNG_DIMENSIONS * N + 1);
for (pos = 0; pos < QRNG_DIMENSIONS * N; pos++) {
unsigned int d = (pos + 1) * distance;
ref = MoroInvCNDcpu(d);
delta = (double)h_OutputGPU[pos] - ref;
sumDelta += fabs(delta);
sumRef += fabs(ref);
}
printf("L1 norm: %E\n\n", L1norm = sumDelta / sumRef);
printf("Shutting down...\n");
sdkDeleteTimer(&hTimer);
free(h_OutputGPU);
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Output, q_ct1)));
exit(L1norm < 1e-6 ? EXIT_SUCCESS : EXIT_FAILURE);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator_kernel.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUASIRANDOMGENERATOR_KERNEL_CUH
#define QUASIRANDOMGENERATOR_KERNEL_CUH
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include "quasirandomGenerator_common.h"
using namespace sycl;
// Fast integer multiplication
#define MUL(a, b) sycl::mul24((unsigned int)a, (unsigned int)b)
////////////////////////////////////////////////////////////////////////////////
// Niederreiter quasirandom number generation kernel
////////////////////////////////////////////////////////////////////////////////
static dpct::constant_memory<unsigned int, 2> c_Table(QRNG_DIMENSIONS,
QRNG_RESOLUTION);
static void quasirandomGeneratorKernel(float *d_Output,
unsigned int seed,
unsigned int N,
const sycl::nd_item<3> &item_ct1,
dpct::accessor<unsigned int, dpct::constant, 2> c_Table) {
unsigned int *dimBase = &c_Table[item_ct1.get_local_id(1)][0];
unsigned int tid = MUL(item_ct1.get_local_range(2), item_ct1.get_group(2)) +
item_ct1.get_local_id(2);
unsigned int threadN =
MUL(item_ct1.get_local_range(2), item_ct1.get_group_range(2));
for (unsigned int pos = tid; pos < N; pos += threadN) {
unsigned int result = 0;
unsigned int data = seed + pos;
for (int bit = 0; bit < QRNG_RESOLUTION; bit++, data >>= 1)
if (data & 1) {
result ^= dimBase[bit];
}
d_Output[MUL(item_ct1.get_local_id(1), N) + pos] =
(float)(result + 1) * INT_SCALE;
}
}
// Table initialization routine
extern "C" void initTableGPU(
unsigned int tableCPU[QRNG_DIMENSIONS][QRNG_RESOLUTION],sycl::queue q_ct1) {
checkCudaErrors(DPCT_CHECK_ERROR(
q_ct1.memcpy(c_Table.get_ptr(), tableCPU,
QRNG_DIMENSIONS * QRNG_RESOLUTION * sizeof(unsigned int))
.wait()));
}
// Host-side interface
extern "C" void quasirandomGeneratorGPU(float *d_Output, unsigned int seed,
unsigned int N, sycl::queue q_ct1) {
sycl::range<3> threads(1, QRNG_DIMENSIONS, 128);
q_ct1.submit([&](sycl::handler &cgh) {
c_Table.init();
auto c_Table_acc_ct1 = c_Table.get_access(cgh);
cgh.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 128) * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
quasirandomGeneratorKernel(d_Output, seed, N, item_ct1,
c_Table_acc_ct1);
});
});
getLastCudaError("quasirandomGeneratorKernel() execution failed.\n");
}
////////////////////////////////////////////////////////////////////////////////
// Moro's Inverse Cumulative Normal Distribution function approximation
////////////////////////////////////////////////////////////////////////////////
inline float MoroInvCNDgpu(unsigned int x) {
const float a1 = 2.50662823884f;
const float a2 = -18.61500062529f;
const float a3 = 41.39119773534f;
const float a4 = -25.44106049637f;
const float b1 = -8.4735109309f;
const float b2 = 23.08336743743f;
const float b3 = -21.06224101826f;
const float b4 = 3.13082909833f;
const float c1 = 0.337475482272615f;
const float c2 = 0.976169019091719f;
const float c3 = 0.160797971491821f;
const float c4 = 2.76438810333863E-02f;
const float c5 = 3.8405729373609E-03f;
const float c6 = 3.951896511919E-04f;
const float c7 = 3.21767881768E-05f;
const float c8 = 2.888167364E-07f;
const float c9 = 3.960315187E-07f;
float z;
bool negate = false;
// Ensure the conversion to floating point will give a value in the
// range (0,0.5] by restricting the input to the bottom half of the
// input domain. We will later reflect the result if the input was
// originally in the top half of the input domain
if (x >= 0x80000000UL) {
x = 0xffffffffUL - x;
negate = true;
}
// x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff])
// Convert to floating point in (0,0.5]
const float x1 = 1.0f / static_cast<float>(0xffffffffUL);
const float x2 = x1 / 2.0f;
float p1 = x * x1 + x2;
// Convert to floating point in (-0.5,0]
float p2 = p1 - 0.5f;
// The input to the Moro inversion is p2 which is in the range
// (-0.5,0]. This means that our output will be the negative side
// of the bell curve (which we will reflect if "negate" is true).
// Main body of the bell curve for |p| < 0.42
if (p2 > -0.42f) {
z = p2 * p2;
z = p2 * (((a4 * z + a3) * z + a2) * z + a1) /
((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0f);
}
// Special case (Chebychev) for tail
else {
z = sycl::log(-sycl::log(p1));
z = -(c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z
* (c8 + z * c9))))))));
}
// If the original input (x) was in the top half of the range, reflect
// to get the positive side of the bell curve
return negate ? -z : z;
}
////////////////////////////////////////////////////////////////////////////////
// Main kernel. Choose between transforming
// input sequence and uniform ascending (0, 1) sequence
////////////////////////////////////////////////////////////////////////////////
static void inverseCNDKernel(float *d_Output, unsigned int *d_Input,
unsigned int pathN,
const sycl::nd_item<3> &item_ct1) {
unsigned int distance = ((unsigned int)-1) / (pathN + 1);
unsigned int tid = MUL(item_ct1.get_local_range(2), item_ct1.get_group(2)) +
item_ct1.get_local_id(2);
unsigned int threadN =
MUL(item_ct1.get_local_range(2), item_ct1.get_group_range(2));
// Transform input number sequence if it's supplied
if (d_Input) {
for (unsigned int pos = tid; pos < pathN; pos += threadN) {
unsigned int d = d_Input[pos];
d_Output[pos] = (float)MoroInvCNDgpu(d);
}
}
// Else generate input uniformly placed samples on the fly
// and write to destination
else {
for (unsigned int pos = tid; pos < pathN; pos += threadN) {
unsigned int d = (pos + 1) * distance;
d_Output[pos] = (float)MoroInvCNDgpu(d);
}
}
}
extern "C" void inverseCNDgpu(float *d_Output, unsigned int *d_Input,
unsigned int N, sycl::queue q_ct1) {
q_ct1.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 128) * sycl::range<3>(1, 1, 128),
sycl::range<3>(1, 1, 128)),
[=](sycl::nd_item<3> item_ct1) {
inverseCNDKernel(d_Output, d_Input, N, item_ct1);
});
getLastCudaError("inverseCNDKernel() execution failed.\n");
}
#endif
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/ccl.hpp>
#include <unordered_map>
#include <memory>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &
get_kvs(const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs>
create_kvs(const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr)
ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() {
delete _ccl_stream_ptr;
};
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const {
return _comm.rank();
}
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const {
return _comm.size();
}
/// Return underlying native device, which was used in oneapi::ccl::communicator
sycl::device get_device() const {
return _comm.get_device().get_native();
}
/// \brief allreduce is a collective communication operation that performs the global reduction operation
/// on values from all ranks of communicator and distributes the result back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the communicator
/// and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param root the rank that gets the result of reduction
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that performs the global reduction operation
/// on values from all ranks of the communicator and scatters the result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param recv_count the number of elements of type @c dtype in receive block
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if(!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr),
_imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh)
{ cgh.host_task([=]
{
_imp->_ccl_event_impl.wait();
delete _imp; }); });
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <sycl/sycl.hpp>
#include <complex>
#include <type_traits>
#include <cassert>
#include <cstdint>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T> class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints> struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T> struct DataType { using T2 = T; };
template <typename T> struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction).wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32)
return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T> inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a)
return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T> inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask,
sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void
nd_range_barrier(const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void
nd_range_barrier(const sycl::nd_item<1> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf)
num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ?
&get_default_queue()
: reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params,
typename R, typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type = std::tuple_element_t<account_for_default_params<i>(),
std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i-1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra)
return nullptr;
for (; (std::size_t) *extra != 0; ++extra) {
if ((std::size_t) *extra == 1) {
return static_cast<char*>(*(extra+1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params),
args_buffer(get_args_buffer(extra))
{}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i>*>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
class image_channel;
class image_wrapper_base;
namespace detail {
/// Image object type traits, with accessor type and sampled data type defined.
/// The data type of an image accessor must be one of sycl::int4, sycl::uint4,
/// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits
/// channel width will be 32 bits. sycl::half is an exception.
template <class T> struct image_trait {
using acc_data_t = sycl::vec<T, 4>;
template <int dimensions>
using accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image>;
template <int dimensions>
using array_accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image_array>;
using data_t = T;
using elem_t = T;
static constexpr image_channel_data_type data_type =
std::is_integral<T>::value
? (std::is_signed<T>::value ? image_channel_data_type::signed_int
: image_channel_data_type::unsigned_int)
: image_channel_data_type::fp;
static constexpr int channel_num = 1;
};
template <>
struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> {
using data_t = std::uint8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::uint16_t>
: public image_trait<std::uint32_t> {
using data_t = std::uint16_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int8_t> : public image_trait<std::int32_t> {
using data_t = std::int8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int16_t> : public image_trait<std::int32_t> {
using data_t = std::int16_t;
using elem_t = data_t;
};
template <>
struct image_trait<char>
: public image_trait<typename std::conditional<
std::is_signed<char>::value, signed char, unsigned char>::type> {};
template <class T>
struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {};
template <class T>
struct image_trait<sycl::vec<T, 2>> : public image_trait<T> {
using data_t = sycl::vec<T, 2>;
static constexpr int channel_num = 2;
};
template <class T>
struct image_trait<sycl::vec<T, 3>>
: public image_trait<sycl::vec<T, 4>> {
static constexpr int channel_num = 3;
};
template <class T>
struct image_trait<sycl::vec<T, 4>> : public image_trait<T> {
using data_t = sycl::vec<T, 4>;
static constexpr int channel_num = 4;
};
/// Functor to fetch data from read result of an image accessor.
template <class T> struct fetch_data {
using return_t = typename image_trait<T>::data_t;
using acc_data_t = typename image_trait<T>::acc_data_t;
return_t operator()(acc_data_t &&original_data) {
return (return_t)original_data.r();
}
};
template <class T>
struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {};
template <class T> struct fetch_data<sycl::vec<T, 2>> {
using return_t = typename image_trait<sycl::vec<T, 2>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g());
}
};
template <class T>
struct fetch_data<sycl::vec<T, 3>>
: public fetch_data<sycl::vec<T, 4>> {};
template <class T> struct fetch_data<sycl::vec<T, 4>> {
using return_t = typename image_trait<sycl::vec<T, 4>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g(), origin_data.b(),
origin_data.a());
}
};
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims);
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims);
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims);
} // namespace detail
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
/// Create image channel info according to template argument \p T.
template <class T> static image_channel create() {
image_channel channel;
channel.set_channel_size(detail::image_trait<T>::channel_num,
sizeof(typename detail::image_trait<T>::elem_t) *
8);
channel.set_channel_data_type(detail::image_trait<T>::data_type);
return channel;
}
image_channel() = default;
image_channel_data_type get_channel_data_type() { return _type; }
void set_channel_data_type(image_channel_data_type type) { _type = type; }
unsigned get_total_size() { return _total_size; }
unsigned get_channel_num() { return _channel_num; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
sycl::image_channel_type get_channel_type() const {
if (_channel_size == 4) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int32;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int32;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp32;
} else if (_channel_size == 2) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int16;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int16;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp16;
} else {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int8;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int8;
}
assert(false && "unexpected channel data kind and channel size");
return sycl::image_channel_type::signed_int32;
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
sycl::image_channel_order get_channel_order() const {
switch (_channel_num) {
case 1:
return sycl::image_channel_order::r;
case 2:
return sycl::image_channel_order::rg;
case 3:
return sycl::image_channel_order::rgb;
case 4:
return sycl::image_channel_order::rgba;
default:
return sycl::image_channel_order::r;
}
}
/// Get the size for each channel in bits.
unsigned get_channel_size() const { return _channel_size * 8; }
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num,
unsigned channel_size) {
if (in_channel_num < _channel_num)
return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions> void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i)
_range[i] = range[i];
_dims = dimensions;
}
template <int... DimIdx>
sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) {
return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...);
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Construct a new image class with the matrix data.
template <int dimensions> sycl::image<dimensions> *create_image() {
return create_image<dimensions>(_channel);
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image(image_channel channel) {
return new sycl::image<dimensions>(
_host_data, channel.get_channel_order(), channel.get_channel_type(),
get_range(make_index_sequence<dimensions>()),
sycl::property::image::use_host_ptr());
}
/// Get channel info.
inline image_channel get_channel() { return _channel; }
/// Get range of the image.
sycl::range<3> get_range() {
return sycl::range<3>(_range[0], _range[1], _range[2]);
}
/// Get matrix dims.
inline int get_dims() { return _dims; }
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data)
std::free(_host_data);
_host_data = nullptr;
}
};
using image_matrix_p = image_matrix *;
enum class image_data_type { matrix, linear, pitch, unsupport };
/// Image data info.
class image_data {
public:
image_data() { _type = image_data_type::unsupport; }
image_data(image_matrix_p matrix_data) { set_data(matrix_data); }
image_data(void *data_ptr, size_t x_size, image_channel channel) {
set_data(data_ptr, x_size, channel);
}
image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
set_data(data_ptr, x_size, y_size, pitch_size, channel);
}
void set_data(image_matrix_p matrix_data) {
_type = image_data_type::matrix;
_data = matrix_data;
_channel = matrix_data->get_channel();
}
void set_data(void *data_ptr, size_t x_size, image_channel channel) {
_type = image_data_type::linear;
_data = data_ptr;
_x = x_size;
_channel = channel;
}
void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
_type = image_data_type::pitch;
_data = data_ptr;
_x = x_size;
_y = y_size;
_pitch = pitch_size;
_channel = channel;
}
image_data_type get_data_type() const { return _type; }
void set_data_type(image_data_type type) { _type = type; }
void *get_data_ptr() const { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_x() const { return _x; }
void set_x(size_t x) { _x = x; }
size_t get_y() const { return _y; }
void set_y(size_t y) { _y = y; }
size_t get_pitch() const { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
image_channel get_channel() const { return _channel; }
void set_channel(image_channel channel) { _channel = channel; }
image_channel_data_type get_channel_data_type() {
return _channel.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_channel.set_channel_data_type(type);
}
unsigned get_channel_size() { return _channel.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _channel.set_channel_size(channel_num, channel_size);
}
unsigned get_channel_num() { return _channel.get_channel_num(); }
void set_channel_num(unsigned num) {
return _channel.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _channel.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _channel.set_channel_type(type);
}
private:
image_data_type _type;
void *_data = nullptr;
size_t _x, _y, _pitch;
image_channel _channel;
};
/// Image sampling info, include addressing mode, filtering mode and
/// normalization info.
class sampling_info {
sycl::addressing_mode _addressing_mode =
sycl::addressing_mode::clamp_to_edge;
sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest;
sycl::coordinate_normalization_mode _coordinate_normalization_mode =
sycl::coordinate_normalization_mode::unnormalized;
public:
sycl::addressing_mode get_addressing_mode() { return _addressing_mode; }
void set(sycl::addressing_mode addressing_mode) { _addressing_mode = addressing_mode; }
sycl::filtering_mode get_filtering_mode() { return _filtering_mode; }
void set(sycl::filtering_mode filtering_mode) { _filtering_mode = filtering_mode; }
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _coordinate_normalization_mode;
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_coordinate_normalization_mode = coordinate_normalization_mode;
}
bool is_coordinate_normalized() {
return _coordinate_normalization_mode ==
sycl::coordinate_normalization_mode::normalized;
}
void set_coordinate_normalization_mode(int is_normalized) {
_coordinate_normalization_mode =
is_normalized ? sycl::coordinate_normalization_mode::normalized
: sycl::coordinate_normalization_mode::unnormalized;
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
sycl::sampler get_sampler() {
return sycl::sampler(_coordinate_normalization_mode, _addressing_mode,
_filtering_mode);
}
};
/// Image base class.
class image_wrapper_base {
sampling_info _sampling_info;
image_data _data;
public:
virtual ~image_wrapper_base() = 0;
void attach(image_data data) { set_data(data); }
/// Attach matrix data to this class.
void attach(image_matrix *matrix) {
detach();
image_wrapper_base::set_data(image_data(matrix));
}
/// Attach matrix data to this class.
void attach(image_matrix *matrix, image_channel channel) {
attach(matrix);
image_wrapper_base::set_channel(channel);
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count) {
attach(ptr, count, get_channel());
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count, image_channel channel) {
detach();
image_wrapper_base::set_data(image_data(const_cast<void *>(ptr), count, channel));
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch) {
attach(data, x, y, pitch, get_channel());
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch,
image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(data), x, y, pitch, channel));
}
/// Detach data.
virtual void detach() {}
sampling_info get_sampling_info() { return _sampling_info; }
void set_sampling_info(sampling_info info) {
_sampling_info = info;
}
const image_data &get_data() { return _data; }
void set_data(image_data data) { _data = data; }
image_channel get_channel() { return _data.get_channel(); }
void set_channel(image_channel channel) { _data.set_channel(channel); }
image_channel_data_type get_channel_data_type() {
return _data.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_data.set_channel_data_type(type);
}
unsigned get_channel_size() { return _data.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _data.set_channel_size(channel_num, channel_size);
}
sycl::addressing_mode get_addressing_mode() {
return _sampling_info.get_addressing_mode();
}
void set(sycl::addressing_mode addressing_mode) {
_sampling_info.set(addressing_mode);
}
sycl::filtering_mode get_filtering_mode() {
return _sampling_info.get_filtering_mode();
}
void set(sycl::filtering_mode filtering_mode) {
_sampling_info.set(filtering_mode);
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _sampling_info.get_coordinate_normalization_mode();
}
void
set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_sampling_info.set(coordinate_normalization_mode);
}
bool is_coordinate_normalized() {
return _sampling_info.is_coordinate_normalized();
}
void set_coordinate_normalization_mode(int is_normalized) {
_sampling_info.set_coordinate_normalization_mode(is_normalized);
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
unsigned get_channel_num() { return _data.get_channel_num(); }
void set_channel_num(unsigned num) {
return _data.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _data.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _data.set_channel_type(type);
}
sycl::sampler get_sampler() { return _sampling_info.get_sampler(); }
};
inline image_wrapper_base::~image_wrapper_base() {}
using image_wrapper_base_p = image_wrapper_base *;
template <class T, int dimensions, bool IsImageArray> class image_accessor_ext;
/// Image class, wrapper of sycl::image.
template <class T, int dimensions, bool IsImageArray = false> class image_wrapper : public image_wrapper_base {
sycl::image<dimensions> *_image = nullptr;
#ifndef DPCT_USM_LEVEL_NONE
std::vector<char> _host_buffer;
#endif
void create_image(sycl::queue q) {
auto &data = get_data();
if (data.get_data_type() == image_data_type::matrix) {
_image = static_cast<image_matrix_p>(data.get_data_ptr())
->create_image<dimensions>(data.get_channel());
return;
}
auto ptr = data.get_data_ptr();
auto channel = data.get_channel();
if (detail::get_pointer_attribute(q, ptr) == detail::pointer_access_attribute::device_only) {
#ifdef DPCT_USM_LEVEL_NONE
ptr = get_buffer(ptr)
.template get_access<sycl::access_mode::read_write>()
.get_pointer();
#else
auto sz = data.get_x();
if (data.get_data_type() == image_data_type::pitch)
sz *= channel.get_total_size() * data.get_y();
_host_buffer.resize(sz);
q.memcpy(_host_buffer.data(), ptr, sz).wait();
ptr = _host_buffer.data();
#endif
}
if constexpr (dimensions == 1) {
assert(data.get_data_type() == image_data_type::linear);
_image = new sycl::image<1>(
ptr, channel.get_channel_order(), channel.get_channel_type(),
sycl::range<1>(data.get_x() / channel.get_total_size()));
} else if constexpr (dimensions == 2) {
assert(data.get_data_type() == image_data_type::pitch);
_image = new sycl::image<2>(ptr, channel.get_channel_order(),
channel.get_channel_type(),
sycl::range<2>(data.get_x(), data.get_y()),
sycl::range<1>(data.get_pitch()));
} else {
throw std::runtime_error("3D image only support matrix data");
}
return;
}
public:
using acc_data_t = typename detail::image_trait<T>::acc_data_t;
using accessor_t =
typename image_accessor_ext<T, IsImageArray ? (dimensions - 1) : dimensions,
IsImageArray>::accessor_t;
image_wrapper() { set_channel(image_channel::create<T>()); }
~image_wrapper() { detach(); }
/// Get image accessor.
accessor_t get_access(sycl::handler &cgh, sycl::queue &q = get_default_queue()) {
if (!_image)
create_image(q);
return accessor_t(*_image, cgh);
}
/// Detach data.
void detach() override {
if (_image)
delete _image;
_image = nullptr;
}
};
/// Wrap sampler and image accessor together.
template <class T, int dimensions, bool IsImageArray = false>
class image_accessor_ext {
public:
using accessor_t =
typename detail::image_trait<T>::template accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 3>
typename std::enable_if<Available, data_t>::type read(float x, float y,
float z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1, class Coord2,
bool Available = dimensions == 3 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value
&&std::is_integral<Coord2>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y,
Coord2 z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(float x, float y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1,
bool Available = dimensions == 2 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(float x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
/// Read data from accessor.
template <class CoordT,
bool Available = dimensions == 1 && std::is_integral<CoordT>::value>
typename std::enable_if<Available, data_t>::type read(CoordT x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
};
template <class T, int dimensions> class image_accessor_ext<T, dimensions, true> {
public:
using accessor_t =
typename detail::image_trait<T>::template array_accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, float x,
float y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, int x, int y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, float x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, int x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
};
/// Create image wrapper according to image data and sampling info.
/// \return Pointer to image wrapper base class.
/// \param data Image data used to create image wrapper.
/// \param info Image sampling info used to create image wrapper.
/// \returns Pointer to base class of created image wrapper object.
static inline image_wrapper_base *create_image_wrapper(image_data data,
sampling_info info) {
image_channel channel;
int dims = 1;
if (data.get_data_type() == image_data_type::matrix) {
auto matrix = (image_matrix_p)data.get_data_ptr();
channel = matrix->get_channel();
dims = matrix->get_dims();
} else {
if (data.get_data_type() == image_data_type::pitch) {
dims = 2;
}
channel = data.get_channel();
}
if (auto ret = detail::create_image_wrapper(channel, dims)) {
ret->set_sampling_info(info);
ret->set_data(data);
return ret;
}
return nullptr;
}
namespace detail {
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims) {
switch (dims) {
case 1:
return new image_wrapper<T, 1>();
case 2:
return new image_wrapper<T, 2>();
case 3:
return new image_wrapper<T, 3>();
default:
return nullptr;
}
}
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims) {
switch (channel_num) {
case 1:
return create_image_wrapper<T>(dims);
case 2:
return create_image_wrapper<sycl::vec<T, 2>>(dims);
case 3:
return create_image_wrapper<sycl::vec<T, 3>>(dims);
case 4:
return create_image_wrapper<sycl::vec<T, 4>>(dims);
default:
return nullptr;
}
}
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims) {
switch (channel.get_channel_type()) {
case sycl::image_channel_type::fp16:
return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims);
case sycl::image_channel_type::fp32:
return create_image_wrapper<float>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int8:
return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int16:
return create_image_wrapper<std::int16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int32:
return create_image_wrapper<std::int32_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int8:
return create_image_wrapper<std::uint8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int16:
return create_image_wrapper<std::uint16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int32:
return create_image_wrapper<std::uint32_t>(channel.get_channel_num(), dims);
default:
return nullptr;
}
}
} // namespace detail
} // namespace dpct
#endif // !__DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/kernel.hpp | //==---- kernel.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_KERNEL_HPP__
#define __DPCT_KERNEL_HPP__
#include <sycl/sycl.hpp>
#ifdef _WIN32
#include <unordered_set>
#include <windows.h>
#else
#include <dlfcn.h>
#endif
#if defined(__has_include) && __has_include(<filesystem>)
#include <filesystem>
#elif defined(__has_include) && __has_include(<experimental/filesystem>)
#include <experimental/filesystem>
#else
#error "SYCLomatic runtime requires C++ filesystem support"
#endif
#include <random>
#include <fstream>
#include <image.hpp>
namespace dpct {
typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &,
unsigned int, void **, void **);
struct kernel_function_info {
int max_work_group_size = 0;
};
static inline void get_kernel_function_info(kernel_function_info *kernel_info,
const void *function) {
kernel_info->max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
}
static inline kernel_function_info
get_kernel_function_info(const void *function) {
kernel_function_info kernel_info;
kernel_info.max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
return kernel_info;
}
namespace detail {
#if defined(__has_include) && __has_include(<filesystem>)
namespace fs = std::filesystem;
#else
namespace fs = std::experimental::filesystem;
#endif
/// Write data to temporary file and return absolute path to temporary file.
/// Temporary file is created in a temporary directory both of which have random
/// names with only the user having access permissions. Only one temporary file
/// will be created in the temporary directory.
static inline fs::path write_data_to_file(char const *const data, size_t size) {
std::error_code ec;
if (sizeof(size_t) >= sizeof(std::streamsize) &&
size > (std::numeric_limits<std::streamsize>::max)())
throw std::runtime_error("data file too large");
// random number generator
std::random_device dev;
std::mt19937 prng(dev());
std::uniform_int_distribution<uint64_t> rand(0);
// find temporary directory
auto tmp_dir = fs::temp_directory_path(ec);
if (ec)
throw std::runtime_error("could not find temporary directory");
// create private directory
std::stringstream directory;
fs::path directory_path;
constexpr int max_attempts = 5;
int i;
for (i = 0; i < max_attempts; i++) {
directory << std::hex << rand(prng);
directory_path = tmp_dir / directory.str();
if (fs::create_directory(directory_path)) {
break;
}
}
if (i == max_attempts)
throw std::runtime_error("could not create directory");
// only allow owner permissions to private directory
fs::permissions(directory_path, fs::perms::owner_all, ec);
if (ec)
throw std::runtime_error("could not set directory permissions");
// random filename in private directory
std::stringstream filename;
filename << std::hex << rand(prng);
#ifdef _WIN32
auto filepath = directory_path / (filename.str() + ".dll");
#else
auto filepath = directory_path / filename.str();
#endif
// write data to temporary file
auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary);
if (outfile) {
// only allow program to write file
fs::permissions(filepath, fs::perms::owner_write, ec);
if (ec)
throw std::runtime_error("could not set permissions");
outfile.write(data, size);
if (!outfile.good())
throw std::runtime_error("could not write data");
outfile.close();
// only allow program to read/execute file
fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec,
ec);
if (ec)
throw std::runtime_error("could not set permissions");
} else
throw std::runtime_error("could not write data");
// check temporary file contents
auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary);
if (infile) {
bool mismatch = false;
size_t cnt = 0;
while (1) {
char c;
infile.get(c);
if (infile.eof())
break;
if (c != data[cnt++])
mismatch = true;
}
if (cnt != size || mismatch)
throw std::runtime_error("file contents not written correctly");
} else
throw std::runtime_error("could not validate file");
if (!filepath.is_absolute())
throw std::runtime_error("temporary filepath is not absolute");
return filepath;
}
static inline uint16_t extract16(unsigned char const *const ptr) {
uint16_t ret = 0;
ret |= static_cast<uint16_t>(ptr[0]) << 0;
ret |= static_cast<uint16_t>(ptr[1]) << 8;
return (ret);
}
static inline uint32_t extract32(unsigned char const *const ptr) {
uint32_t ret = 0;
ret |= static_cast<uint32_t>(ptr[0]) << 0;
ret |= static_cast<uint32_t>(ptr[1]) << 8;
ret |= static_cast<uint32_t>(ptr[2]) << 16;
ret |= static_cast<uint32_t>(ptr[3]) << 24;
return (ret);
}
static inline uint64_t extract64(unsigned char const *const ptr) {
uint64_t ret = 0;
ret |= static_cast<uint64_t>(ptr[0]) << 0;
ret |= static_cast<uint64_t>(ptr[1]) << 8;
ret |= static_cast<uint64_t>(ptr[2]) << 16;
ret |= static_cast<uint64_t>(ptr[3]) << 24;
ret |= static_cast<uint64_t>(ptr[4]) << 32;
ret |= static_cast<uint64_t>(ptr[5]) << 40;
ret |= static_cast<uint64_t>(ptr[6]) << 48;
ret |= static_cast<uint64_t>(ptr[7]) << 56;
return (ret);
}
static inline uint64_t get_lib_size(char const *const blob) {
#ifdef _WIN32
///////////////////////////////////////////////////////////////////////
// Analyze DOS stub
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
if (ublob[0] != 0x4d || ublob[1] != 0x5a) {
throw std::runtime_error("Blob is not a Windows DLL.");
}
uint32_t pe_header_offset = extract32(ublob + 0x3c);
///////////////////////////////////////////////////////////////////////
// Ananlyze PE-header
unsigned char const *const pe_header = ublob + pe_header_offset;
// signature
uint32_t pe_signature = extract32(pe_header + 0);
if (pe_signature != 0x00004550) {
throw std::runtime_error("PE-header signature is not 0x00004550");
}
// machine
uint16_t machine = extract16(pe_header + 4);
if (machine != 0x8664) {
throw std::runtime_error("Only DLLs for x64 supported");
}
// number of sections
uint16_t number_of_sections = extract16(pe_header + 6);
// sizeof optional header
uint16_t sizeof_optional_header = extract16(pe_header + 20);
// magic
uint16_t magic = extract16(pe_header + 24);
if (magic != 0x10b && magic != 0x20b) {
throw std::runtime_error("MAGIC is not 0x010b or 0x020b");
}
///////////////////////////////////////////////////////////////////////
// Analyze tail of optional header
constexpr int coff_header_size = 24;
unsigned char const *const tail_of_optional_header =
pe_header + coff_header_size + sizeof_optional_header;
if (extract64(tail_of_optional_header - 8) != 0) {
throw std::runtime_error("Optional header not zero-padded");
}
///////////////////////////////////////////////////////////////////////
// Analyze last section header
constexpr int section_header_size = 40;
unsigned char const *const last_section_header =
tail_of_optional_header + section_header_size * (number_of_sections - 1);
uint32_t sizeof_raw_data = extract32(last_section_header + 16);
uint32_t pointer_to_raw_data = extract32(last_section_header + 20);
return sizeof_raw_data + pointer_to_raw_data;
#else
if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F')
throw std::runtime_error("Blob is not in ELF format");
if (blob[4] != 0x02)
throw std::runtime_error("Only 64-bit headers are supported");
if (blob[5] != 0x01)
throw std::runtime_error("Only little-endian headers are supported");
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
uint64_t e_shoff = extract64(ublob + 0x28);
uint16_t e_shentsize = extract16(ublob + 0x3A);
uint16_t e_shnum = extract16(ublob + 0x3C);
return e_shoff + (e_shentsize * e_shnum);
#endif
}
#ifdef _WIN32
class path_lib_record {
public:
void operator=(const path_lib_record &) = delete;
~path_lib_record() {
for (auto entry : lib_to_path) {
FreeLibrary(static_cast<HMODULE>(entry.first));
fs::permissions(entry.second, fs::perms::owner_all);
fs::remove_all(entry.second.remove_filename());
}
}
static void record_lib_path(fs::path path, void *library) {
lib_to_path[library] = path;
}
static void remove_lib(void *library) {
auto path = lib_to_path[library];
std::error_code ec;
FreeLibrary(static_cast<HMODULE>(library));
fs::permissions(path, fs::perms::owner_all);
if (fs::remove_all(path.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
lib_to_path.erase(library);
}
private:
static inline std::unordered_map<void *, fs::path> lib_to_path;
};
#endif
} // namespace detail
class kernel_library {
public:
kernel_library() : ptr{nullptr} {}
kernel_library(void *ptr) : ptr{ptr} {}
operator void *() const { return ptr; }
private:
void *ptr;
#ifdef _WIN32
static inline detail::path_lib_record single_instance_to_trigger_destructor;
#endif
};
namespace detail {
static inline kernel_library load_dl_from_data(char const *const data,
size_t size) {
fs::path filename = write_data_to_file(data, size);
#ifdef _WIN32
void *so = LoadLibraryW(filename.wstring().c_str());
#else
void *so = dlopen(filename.c_str(), RTLD_LAZY);
#endif
if (so == nullptr)
throw std::runtime_error("Failed to load kernel library");
#ifdef _WIN32
detail::path_lib_record::record_lib_path(filename, so);
#else
std::error_code ec;
// Windows DLL cannot be deleted while in use
if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
#endif
return so;
}
} // namespace detail
/// Load kernel library and return a handle to use the library.
/// \param [in] name The name of the library.
static inline kernel_library load_kernel_library(const std::string &name) {
std::ifstream ifs;
ifs.open(name, std::ios::in | std::ios::binary);
std::stringstream buffer;
buffer << ifs.rdbuf();
const std::string buffer_string = buffer.str();
return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size());
}
/// Load kernel library whose image is alreay in memory and return a handle to
/// use the library.
/// \param [in] image A pointer to the image in memory.
static inline kernel_library load_kernel_library_mem(char const *const image) {
const size_t size = detail::get_lib_size(image);
return detail::load_dl_from_data(image, size);
}
/// Unload kernel library.
/// \param [in,out] library Handle to the library to be closed.
static inline void unload_kernel_library(const kernel_library &library) {
#ifdef _WIN32
detail::path_lib_record::remove_lib(library);
#else
dlclose(library);
#endif
}
class kernel_function {
public:
kernel_function() : ptr{nullptr} {}
kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {}
operator void *() const { return ((void *)ptr); }
void operator()(sycl::queue &q, const sycl::nd_range<3> &range,
unsigned int a, void **args, void **extra) {
ptr(q, range, a, args, extra);
}
private:
dpct::kernel_functor ptr;
};
/// Find kernel function in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the kernel function.
static inline dpct::kernel_function
get_kernel_function(kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)),
(name + std::string("_wrapper")).c_str()));
#else
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
dlsym(library, (name + std::string("_wrapper")).c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get function");
return fn;
}
/// Invoke a kernel function.
/// \param [in] function kernel function.
/// \param [in] queue SYCL queue used to execute kernel
/// \param [in] groupRange SYCL group range
/// \param [in] localRange SYCL local range
/// \param [in] localMemSize The size of local memory required by the kernel
/// function.
/// \param [in] kernelParams Array of pointers to kernel arguments.
/// \param [in] extra Extra arguments.
static inline void invoke_kernel_function(dpct::kernel_function &function,
sycl::queue &queue,
sycl::range<3> groupRange,
sycl::range<3> localRange,
unsigned int localMemSize,
void **kernelParams, void **extra) {
function(queue, sycl::nd_range<3>(groupRange * localRange, localRange),
localMemSize, kernelParams, extra);
}
/// Find image wrapper in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the target image wrapper.
static inline dpct::image_wrapper_base_p
get_image_wrapper(dpct::kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::image_wrapper_base_p fn =
reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress(
static_cast<HMODULE>(static_cast<void *>(library)), name.c_str()));
#else
dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(
dlsym(library, name.c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get image");
return fn;
}
} // namespace dpct
#endif // __DPCT_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
template <class... Args> class dpct_kernel_name;
template <int Arg> class dpct_kernel_scalar;
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "kernel.hpp"
#include "math.hpp"
#include "memory.hpp"
#include "util.hpp"
#if defined(_MSC_VER)
#define __dpct_align__(n) __declspec(align(n))
#define __dpct_inline__ __forceinline
#else
#define __dpct_align__(n) __attribute__((aligned(n)))
#define __dpct_inline__ __inline__ __attribute__((always_inline))
#endif
#if defined(_MSC_VER)
#define __dpct_noinline__ __declspec(noinline)
#else
#define __dpct_noinline__ __attribute__((noinline))
#endif
#define DPCT_COMPATIBILITY_TEMP (600)
namespace dpct{
enum error_code { success = 0, default_error = 999 };
}
#define DPCT_CHECK_ERROR(expr) \
[&]() { \
try { \
expr; \
return dpct::success; \
} catch (std::exception const &e) { \
std::cerr << e.what() << std::endl; \
return dpct::default_error; \
} \
}()
#define DPCT_PI_F (3.14159274101257f)
#define DPCT_PI (3.141592653589793115998)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dnnl_utils.hpp | //==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DNNL_UTILS_HPP__
#define __DPCT_DNNL_UTILS_HPP__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <oneapi/mkl.hpp>
#include <oneapi/mkl/rng/device.hpp>
#include <sycl/sycl.hpp>
#include <oneapi/dnnl/dnnl.hpp>
#include <oneapi/dnnl/dnnl_sycl.hpp>
#include <unordered_map>
#include <algorithm>
#include <list>
#include "memory.hpp"
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace dnnl {
/// Get concatenated library version as an integer.
static inline size_t get_version() {
const ::dnnl::version_t *ver = ::dnnl::version();
return ver->major * 1000 + ver->minor * 100 + ver->patch;
}
class engine_ext;
typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t;
/// An enum class representing memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class memory_format_tag { nchw, nhwc, nchw_blocked };
/// An enum class representing RNN data memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class rnn_memory_format_tag { tnc, ntc };
/// A class holding the description of an N-dimensions memory.
class memory_desc_ext {
::dnnl::memory::desc _desc;
public:
/// Convert dpct::library_data_t to dnnl::memory::data_type.
static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt);
/// Convert dnnl::memory::data_type to dpct::library_data_t.
static dpct::library_data_t
to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size);
/// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag.
static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag);
memory_desc_ext() = default;
memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {}
memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {}
/// Setting a 4D memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h,
int w);
/// Setting a 3D RNN data memory with given parameters.
/// \param [in] tag RNN data format tag.
/// \param [in] dt Data type.
/// \param [in] t Number of sequence length.
/// \param [in] n Number of batch.
/// \param [in] c Height of input channel.
void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c);
/// Setting a 4D memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
/// \param [in] n_stride Stride between two continuous images.
/// \param [in] c_stride Stride between two continuous channels.
/// \param [in] h_stride Stride between two continuous rows.
/// \param [in] w_stride Stride between two continuous columns.
void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride,
int c_stride, int h_stride, int w_stride);
/// Setting a ND memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension. \param [in] strides Array of dimension ndims that
/// contain the stride of each memory dimension.
void set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]);
/// Setting a ND memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension.
void set(memory_format_tag tag, dpct::library_data_t dt, int ndims,
const int dims[]);
/// Getting a ::dnnl::memory::desc from a memory_desc_ext.
/// \returns The ::dnnl::memory::desc.
const ::dnnl::memory::desc &get_desc() const { return _desc; }
/// Setting holding desc with given dnnl memory descriptor.
void set_desc(::dnnl::memory::desc desc) { _desc = desc; }
/// Getting a size of a memory_desc_ext in bytes.
/// \returns The size.
size_t get_size() const { return _desc.get_size(); }
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
/// \param [out] n_stride Stride between two continuous images.
/// \param [out] c_stride Stride between two continuous channels.
/// \param [out] h_stride Stride between two continuous rows.
/// \param [out] w_stride Stride between two continuous columns.
void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w,
int *n_stride, int *c_stride, int *h_stride, int *w_stride) const;
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c,
int *h, int *w) const;
/// Getting parameters from a 3D RNN data memory.
/// \param [out] dt Data type.
/// \param [out] tag RNN data format tag.
/// \param [out] t Number of sequence length.
/// \param [out] n Number of batch.
/// \param [out] c Height of input channel.
void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n,
int *c) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
/// \param [out] strides Array of dimension requested_ndims that contain the
/// stride of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt, int *ndims,
int dims[], int strides[]) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims, int dims[]) const;
/// Getting dims from a ND memory.
/// \return The dims.
std::vector<int64_t> get_dims() const { return _desc.get_dims(); }
/// Getting strides from a ND memory.
/// \return The strides.
std::vector<int64_t> get_strides() const {
return _desc.get_strides();
}
/// Getting element num from a ND memory.
/// \return The element number.
size_t get_element_num() const {
auto dims = _desc.get_dims();
if (dims.empty()) {
return 0;
}
size_t result = 1;
for (auto &dim : dims) {
result *= dim;
}
return result;
}
operator bool() const {
return bool(_desc);
}
memory_desc_ext &operator=(std::nullptr_t) {
_desc.reset(nullptr);
return *this;
}
};
/// A class holding description for an activation operation.
class activation_desc {
::dnnl::algorithm _alg;
float _alpha;
float _beta;
public:
/// Setting an activation descriptor with given parameters.
/// \param [in] alg Activation algorithm.
/// \param [in] alpha Value of alpha parameter.
void set(::dnnl::algorithm alg, float alpha) {
_alg = alg;
if(alg == ::dnnl::algorithm::eltwise_clip) {
_alpha = 0;
_beta = alpha;
} else {
_alpha = alpha;
}
}
/// Getting parameters form an activation descriptor.
/// \param [out] alg Activation algorithm.
/// \param [out] alpha Value of alpha parameter.
void get(::dnnl::algorithm *alg, float *alpha) const {
*alg = _alg;
if(_alg == ::dnnl::algorithm::eltwise_clip) {
*alpha = _beta;
} else {
*alpha = _alpha;
}
}
/// Setting the alpha parameter of an activation descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of an activation descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the algorithm parameter of an activation descriptor.
/// \param [in] alg Activation algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Getting the alpha parameter from an activation descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from an activation descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the algorithm parameter from an activation descriptor.
/// \param [out] alg Activation algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
};
/// A class holding description for a local response normalization operation.
class lrn_desc {
unsigned int _local_size;
float _alpha;
float _beta;
float _k;
public:
/// Setting a local response normalization descriptor with given parameters.
/// \param [in] local_size Value of local_size parameter.
/// \param [in] alpha Value of alpha parameter.
/// \param [in] beta Value of beta parameter.
/// \param [in] k Value of k parameter.
void set(unsigned int local_size, float alpha, float beta, float k) {
_local_size = local_size;
_alpha = alpha;
_beta = beta;
_k = k;
}
/// Getting parameters form a local response normalization descriptor.
/// \param [out] local_size Value of local_size parameter.
/// \param [out] alpha Value of alpha parameter.
/// \param [out] beta Value of beta parameter.
/// \param [out] k Value of k parameter.
void get(unsigned int *local_size, float *alpha, float *beta,
float *k) const {
*local_size = _local_size;
*alpha = _alpha;
*beta = _beta;
*k = _k;
}
/// Setting the local size parameter of a local response normalization
/// descriptor.
/// \param [in] local_size Value of local_size parameter.
void set_local_size(unsigned int local_size) { _local_size = local_size; }
/// Setting the alpha parameter of a local response normalization descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of a local response normalization descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the k parameter of a local response normalization descriptor.
/// \param [in] k Value of k parameter.
void set_k(float k) { _k = k; }
/// Getting the local size parameter from a local response normalization
/// descriptor.
/// \param [out] local_size Value of local_size parameter.
unsigned int get_local_size() const { return _local_size; }
/// Getting the alpha parameter from a local response normalization
/// descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from a local response normalization descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the k parameter from a local response normalization descriptor.
/// \param [out] k Value of k parameter.
float get_k() const { return _k; }
};
/// An enum class representing softmax algorithm.
enum class softmax_algorithm { normal, log };
/// An enum class representing softmax mode.
enum class softmax_mode { instance, channel };
/// A class holding description for a pooling operation.
class pooling_desc {
::dnnl::algorithm _alg;
std::vector<int64_t> _stride;
std::vector<int64_t> _kernel;
std::vector<int64_t> _padding;
public:
/// Setting a 2D pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] kernel_h Value of height of kernel.
/// \param [in] kernel_w Value of width of kernel.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h,
int padding_w, int stride_h, int stride_w) {
_alg = alg;
_stride = {stride_h, stride_w};
_kernel = {kernel_h, kernel_w};
_padding = {padding_h, padding_w};
}
/// Setting a ND pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] ndims Dimension of the pooling operation.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [in] padding Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[],
int stride[]) {
_alg = alg;
_stride = std::vector<int64_t>(stride, stride + ndims);
_kernel = std::vector<int64_t>(kernel, kernel + ndims);
_padding = std::vector<int64_t>(padding, padding + ndims);
}
/// Getting parameters from a 2D pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] kernel_h Value of height of kernel.
/// \param [out] kernel_w Value of width of kernel.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h,
int *padding_w, int *stride_h, int *stride_w) const {
*alg = _alg;
*kernel_h = _kernel[0];
*kernel_w = _kernel[1];
*padding_h = _padding[0];
*padding_w = _padding[1];
*stride_h = _stride[0];
*stride_w = _stride[1];
}
/// Getting parameters from a ND pooling descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [out] padding Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] stride Array of dimension ndims containing the stride size of
/// each dimension.
void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims,
int kernel[], int padding[], int stride[]) const {
*alg = _alg;
*ndims = _stride.size();
for (int i = 0; i < requested_ndims; i++) {
kernel[i] = _kernel[i];
padding[i] = _padding[i];
stride[i] = _stride[i];
}
}
/// Setting the algorithm parameter of a pooling descriptor.
/// \param [in] alg Pooling algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Setting the stride parameter of a pooling descriptor.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set_stride(const std::vector<int64_t> &stride) { _stride = stride; }
/// Setting the kernel parameter of a pooling descriptor.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; }
/// Setting the padding parameter of a pooling descriptor.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension.
void set_padding(const std::vector<int64_t> &padding) { _padding = padding; }
/// Getting the algorithm parameter from a pooling descriptor.
/// \param [out] alg Pooling algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
/// Getting the stride parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _stride; }
/// Getting the kernel parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the kernel size of each
/// dimension.
const std::vector<int64_t> &get_kernel() const { return _kernel; }
/// Getting the padding parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _padding; }
/// Getting the output dimensions of a memory after 2D pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
*out_n = dims[0];
*out_c = dims[1];
*out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0];
*out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1];
}
/// Getting the output dimensions of a memory after ND pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] =
1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2];
}
}
};
/// An enum class representing reduction operations.
enum class reduction_op {
max,
min,
sum,
mul,
mean,
amax,
mul_no_zeros,
norm1,
norm2
};
/// An enum class representing batch normalization mode.
enum class batch_normalization_mode { per_activation, spatial };
/// An enum class representing batch normalization operations.
enum class batch_normalization_ops { none, activation, add_activation };
/// An enum class representing binary operations.
enum class binary_op { add, sub, mul, div, min, max, sqrt, neg };
/// An struct representing convolution algorithm infomation.
struct convolution_algorithm_info {
::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto;
int status = 0;
};
/// A class holding description for a convolution operation.
class convolution_desc {
std::vector<int64_t> _strides;
std::vector<int64_t> _dilates;
std::vector<int64_t> _paddings;
int _group_count = 1;
::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict;
public:
/// Setting a group count to be used in the convolution.
/// \param [in] group_count Value of group count.
void set_group_count(int group_count) { _group_count = group_count; }
/// Getting a group count specified in the given convolution descriptor.
/// \returns Value of group count.
int get_group_count() { return _group_count; }
/// Setting floating point math mode to be used in the convolution.
/// \param [in] math_mode Value of math_mode.
void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; }
/// Getting floating point math mode specified in the given convolution descriptor.
/// \returns Value of math mode.
::dnnl::fpmath_mode get_math_mode() { return _math_mode; }
/// Setting a 2D convolution descriptor with given parameters.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
/// \param [in] dilate_h Value of height of dilate.
/// \param [in] dilate_w Value of width of dilate.
void set(int padding_h, int padding_w, int stride_h, int stride_w,
int dilate_h, int dilate_w) {
_strides = {stride_h, stride_w};
_dilates = {dilate_h - 1, dilate_w - 1};
_paddings = {padding_h, padding_w};
}
/// Setting a ND convolution descriptor with given parameters.
/// \param [in] ndims Dimension of the convolution operation.
/// \param [in] paddings Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [in] dilates Array of dimension ndims containing the kernel size of
/// each dimension.
void set(int ndims, int paddings[], int strides[], int dilates[]) {
_strides = std::vector<int64_t>(strides, strides + ndims);
_paddings = std::vector<int64_t>(paddings, paddings + ndims);
_dilates = std::vector<int64_t>(dilates, dilates + ndims);
for (auto &dilate : _dilates) {
dilate--;
}
}
/// Getting parameters from a 2D convolution descriptor.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
/// \param [out] dilate_h Value of height of dilate.
/// \param [out] dilate_w Value of width of dilate.
void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w,
int *dilate_h, int *dilate_w) const {
*dilate_h = _dilates[0];
*dilate_w = _dilates[1];
*padding_h = _paddings[0];
*padding_w = _paddings[1];
*stride_h = _strides[0];
*stride_w = _strides[1];
}
/// Getting parameters from a ND convolution descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given convolution descriptor.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] paddings Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [out] dilates Array of dimension ndims containing the dilate size of
/// each dimension.
void get(int requested_ndims, int *ndims, int paddings[], int strides[],
int dilates[]) const {
*ndims = _strides.size();
for (int i = 0; i < requested_ndims; i++) {
dilates[i] = _dilates[i];
paddings[i] = _paddings[i];
strides[i] = _strides[i];
}
}
/// Getting the stride parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _strides; }
/// Getting the kernel parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the dilate size of each
/// dimension.
const std::vector<int64_t> &get_dilate() const { return _dilates; }
/// Getting the padding parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _paddings; }
/// Getting the output dimensions of a memory after 2D convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
*out_n = dims[0];
*out_c = weight_dims[0];
*out_h = 1 + (dims[2] + 2 * _paddings[0] -
(1 + (_dilates[0] * (weight_dims[2] - 1)))) /
_strides[0];
*out_w = 1 + (dims[3] + 2 * _paddings[1] -
(1 + (_dilates[1] * (weight_dims[3] - 1)))) /
_strides[1];
}
/// Getting the output dimensions of a memory after ND convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = weight_dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] -
(1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) /
_strides[i - 2];
}
}
convolution_desc &operator=(std::nullptr_t) {
return *this = convolution_desc();
}
operator bool() const {
return !(_strides.size() == 0
&& _dilates.size() == 0
&& _paddings.size() == 0);
}
};
/// An enum class representing rnn mode.
enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru };
/// An enum class representing rnn bias mode.
enum class rnn_bias_mode { none, single };
/// An enum class representing rnn direction.
enum class rnn_direction {unidirectional, bidirectional};
/// A class holding description for a RNN operation.
class rnn_desc {
rnn_mode _mode;
rnn_bias_mode _bias_mode;
rnn_direction _direction;
dpct::library_data_t _dt;
int _input_size;
int _hidden_size;
int _projection_size;
int _layer_size;
public:
void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction,
dpct::library_data_t dt, int input_size, int hidden_size,
int projection_size, int layer_size) {
_mode = mode;
_bias_mode = bias_mode;
_direction = direction;
_input_size = input_size;
_hidden_size = hidden_size;
_projection_size = projection_size;
_layer_size = layer_size;
_dt = dt;
}
void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction,
dpct::library_data_t *dt, int *input_size, int *hidden_size,
int *projection_size, int *layer_size) const {
*mode = _mode;
*bias_mode = _bias_mode;
*direction = _direction;
*input_size = _input_size;
*hidden_size = _hidden_size;
*projection_size = _projection_size;
*layer_size = _layer_size;
*dt = _dt;
}
};
/// A class holding description for a Dropout operation.
class dropout_desc {
struct dropout_desc_imp {
float _p = 0.5f;
unsigned long long _seed = 1;
void *_state = nullptr;
std::vector<std::uint8_t> _host_state;
rng_engine_t _rng_engine;
dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {}
};
std::shared_ptr<dropout_desc_imp> _imp;
void generate(sycl::queue *q, std::int64_t required_state_size,
std::int64_t num, void *buffer) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::event e_gen = oneapi::mkl::rng::generate(
oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p),
_imp->_rng_engine, num, (std::int32_t *)buffer);
sycl::event e_save = q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e_gen);
cgh.host_task([=] {
oneapi::mkl::rng::save_state(_imp->_rng_engine,
_imp->_host_state.data());
});
});
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size,
e_save);
#endif
}
public:
operator bool() const {
return bool(_imp);
}
dropout_desc &operator=(std::nullptr_t) {
_imp.reset();
return *this;
}
/// Initializing a dropout descriptor.
void init(){
_imp = std::make_shared<dropout_desc_imp>();
}
/// Setting a dropout descriptor with given parameters.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void set(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
/// Getting parameters from a dropout descriptor.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void get(float *p, void **states, unsigned long long *seed) const noexcept {
*seed = _imp->_seed;
*states = _imp->_state;
*p = _imp->_p;
}
/// Getting the probability of value set to zero.
/// \returns Probability.
float get_probability() const noexcept { return _imp->_p; }
/// Restoreing a dropout descriptor from stored state.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void restore(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
friend class engine_ext;
};
namespace detail {
typedef std::string primitive_cache_key_type;
typedef std::list<primitive_cache_key_type> usage_list_type;
typedef struct {
::dnnl::primitive *primitive;
usage_list_type::iterator usage_it;
std::function<void(::dnnl::primitive *)> destructor;
sycl::event e;
} primitive_cache_value_type;
typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type>
cache_map_type;
// The primitive cache uses LRU replacement policy, and the default cache
// capacity is 1024.
class primitive_cache {
int _capacity = 1024;
usage_list_type usage;
cache_map_type cache_map;
void touch(cache_map_type::iterator it, sycl::event e = {},
bool update_event = false) {
if (it->second.usage_it != usage.begin()) {
const primitive_cache_key_type &key = it->first;
usage.erase(it->second.usage_it);
usage.push_front(key);
it->second.usage_it = usage.begin();
}
if (update_event) {
it->second.e = e;
}
}
void async_destruct_primitive(const primitive_cache_value_type &value) {
dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) {
cgh.depends_on(value.e);
cgh.host_task([=] { value.destructor(value.primitive); });
});
}
public:
::dnnl::primitive *get(const primitive_cache_key_type &key) {
auto it = cache_map.find(key);
if (it == cache_map.end()) {
return nullptr;
}
touch(it);
return it->second.primitive;
}
void put(const primitive_cache_key_type &key, ::dnnl::primitive *value,
std::function<void(::dnnl::primitive *)> destructor, sycl::event e) {
auto it = cache_map.find(key);
if (it != cache_map.end()) {
touch(it, e, true);
} else {
if (cache_map.size() == _capacity) {
auto last_primitive = cache_map.find(usage.back());
async_destruct_primitive(last_primitive->second);
cache_map.erase(usage.back());
usage.pop_back();
}
usage.push_front(key);
cache_map[key] = {value, usage.begin(), destructor, e};
}
}
~primitive_cache() {
for (auto &v : cache_map) {
async_destruct_primitive(v.second);
}
}
};
} // namespace detail
/// A class holding the oneDNN engine.
class engine_ext {
struct output_argument_info {
float _alpha;
float _beta;
int _name;
memory_desc_ext _desc;
void *_data;
output_argument_info(float alpha, float beta, int name,
memory_desc_ext desc, void *data)
: _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {}
output_argument_info(float alpha, float beta, memory_desc_ext desc,
void *data)
: _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {}
};
::dnnl::engine _eng;
::dnnl::stream _s;
sycl::queue *_q = nullptr;
std::map<void *, ::dnnl::memory> workspace_map;
std::int64_t _random_engine_state_size = -1;
detail::primitive_cache _primitive_cache;
::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; }
void insert_workspace(void *key, ::dnnl::memory workspace) {
workspace_map[key] = workspace;
}
const ::dnnl::stream &get_stream() const { return _s; }
const ::dnnl::engine &get_engine() const { return _eng; }
void *allocate(const memory_desc_ext &desc, int count = 1) const;
::dnnl::memory::desc
compress_spatial_dimensions_to_channel(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode);
sycl::event batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var);
sycl::event batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var);
::dnnl::memory::desc
transfer_memory_desc_to_channel_major_format(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
bn_reorder_memory_to_channel_major_format(bool is_input, ::dnnl::memory::desc &desc,
void *src, void **cache,
std::vector<void *> &caches);
::dnnl::memory::desc
transfer_memory_desc_to_format_tag_any(const ::dnnl::memory::desc &desc){
return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(),
::dnnl::memory::format_tag::any);
}
void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc,
void *&from,
::dnnl::memory::desc &to_desc,
void *&to,
std::vector<void *> &caches) {
if (from_desc != to_desc) {
to = allocate(to_desc);
caches.push_back(to);
async_reorder(1.f, from_desc, from, 0.f, to_desc, to);
}
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive(args_type &&...args);
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive_with_pd(const typename primitive_type::primitive_desc &pd);
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
create_primitive_desc(args_type &&...args);
template <typename primitive_desc_type>
std::string generate_cache_key(const primitive_desc_type &pd);
void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) {
ss.write((char *)dims.data(), dims.size() * sizeof(int64_t));
};
void serialize_mem_desc(std::stringstream &ss,
const ::dnnl::memory::desc &desc) {
if (desc.is_zero()) {
return;
}
auto format_kind = desc.get_format_kind();
ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type()
<< (std::uint8_t)format_kind;
serialize_dims(ss, desc.get_dims());
serialize_dims(ss, desc.get_strides());
if (format_kind == ::dnnl::memory::format_kind::blocked) {
ss << desc.get_inner_nblks();
serialize_dims(ss, desc.get_inner_blks());
serialize_dims(ss, desc.get_inner_idxs());
}
};
sycl::event execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size,
int gate_num, int projection_size, std::vector<void *> &data,
std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr,
size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr);
sycl::event rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query);
sycl::event execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num);
void async_free(sycl::queue *q, sycl::event e,
std::unordered_map<int, ::dnnl::memory> *args,
std::vector<void *> device_ptrs = {}) {
q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
if (args) {
delete args;
}
for (auto ptr : device_ptrs) {
if (ptr) {
sycl::free(ptr, *_q);
}
}
});
});
};
bool
scale_parameter_preprocess(const std::vector<output_argument_info> &args);
template <typename primitive_type>
sycl::event
execute_primitive(const std::pair<detail::primitive_cache_key_type,
primitive_type *> &primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &extra_args = {},
const std::vector<void *> &device_ptrs = {});
template <typename T>
sycl::event fill_with_type(sycl::queue *q, void *src, const void *value,
size_t size_with_byte) {
return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value),
size_with_byte / sizeof(T));
}
template <typename T> struct no_zero_op {
T operator()(T e) {
if (!e) {
return 1;
}
return e;
}
};
template <typename T>
void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst,
size_t num) {
std::transform(oneapi::dpl::execution::make_device_policy(*q),
static_cast<T *>(src), static_cast<T *>(src) + num,
static_cast<T *>(dst), no_zero_op<T>());
}
void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst);
::dnnl::memory::desc get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc);
void get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num);
public:
engine_ext() {}
operator bool() const {
return bool(_eng) && bool(_s) && bool(_q);
}
engine_ext &operator=(std::nullptr_t) {
_eng.reset(nullptr);
_s.reset(nullptr);
_q = nullptr;
return *this;
}
/// Creating oneDNN engine.
void create_engine() {
_eng = ::dnnl::sycl_interop::make_engine(
dpct::get_current_device(), dpct::get_current_device().get_context());
_s = ::dnnl::sycl_interop::make_stream(
_eng, dpct::get_current_device().default_queue());
_q = &dpct::get_current_device().default_queue();
}
/// Setting the user's SYCL queue for an oneDNN engine.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) {
if (!q) {
throw std::runtime_error("set_queue: pointer must not be nullptr.");
}
if (!_eng) {
throw std::runtime_error("set_queue: current engine is invalid.");
}
if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) {
throw std::runtime_error(
"set_queue: queue is mismatch with current engine context.");
}
_q = q;
_s = ::dnnl::sycl_interop::make_stream(_eng, *q);
}
/// Retrieving the user's SYCL queue set in the oneDNN engine.
/// \returns Pointer to the SYCL queue.
sycl::queue *get_queue() const { return _q; }
/// Setting all elements of a memory to a given value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
void fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
void scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void
activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
void pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
void pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
void lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Setting all elements of a memory to a given value asynchronously.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
/// \returns An event representing the fill operations.
sycl::event async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reorder operations.
sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor asynchronously.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
/// \returns An event representing the scale operations.
sycl::event async_scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the sum operations.
sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified binary operation.
/// \param [in] alpha_0 Value to scaling factors used to scale the src_0
/// value.
/// \param [in] src_desc_0 Source 0 memory descriptor.
/// \param [in] src_0 Pointer to source 0 data.
/// \param [in] alpha_1 Value to scaling factors used to scale the src_1
/// value.
/// \param [in] src_desc_1 Source 1 memory descriptor.
/// \param [in] src_1 Pointer to source 1 data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the binary operations.
sycl::event async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified reduction operation.
/// \param [in] alpha Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reduction operations.
sycl::event async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the activation forward operations.
sycl::event async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the activation backward operations.
sycl::event
async_activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value asynchronously.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
/// \returns An event representing the pooling forward operations.
sycl::event async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
/// \returns An event representing the pooling backward operations.
sycl::event async_pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the softmax forward operations.
sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the softmax backward operations.
sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value
/// asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the lrn forward operations.
sycl::event async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the lrn backward operations.
sycl::event async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] desc Derived memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] scale_bias_desc Derived scale and bias memory descriptor.
/// \param [out] mean_var_desc Derived mean and var memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &scale_bias_desc,
memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Get the size of workspace that needed by batch normalization. The data stored
/// in workspace must be preserved between forward and backward.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Size of workspace.
size_t get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [in] diff_scale Pointer to differential scale data.
/// \param [in] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_desc Differential scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] mean_var_desc Differential mean, variance memory descriptor.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] alpha_0 Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] alpha_1 Value to scaling factors used to scale the summand
/// value.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] bias_desc Bias memory descriptor.
/// \param [in] bias Pointer to bias data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the data gradient of a specified convolution function asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the convolution backward data operations.
sycl::event async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing the weight gradient of a specified convolution function
/// asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_weight_desc Differential weight memory descriptor.
/// \param [out] diff_weight Pointer to differential weight data.
/// \returns An event representing the convolution backward weight operations.
sycl::event async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight);
/// Computing the bias gradient of a specified convolution function
/// asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_bias_desc Differential bias memory descriptor.
/// \param [out] diff_bias Pointer to differential bias data.
/// \returns An event representing the convolution backward bias operations.
sycl::event async_convolution_backward_bias(float alpha,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_bias_desc,
void *diff_bias);
/// Getting the required weight space size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [out] weight_space_size Size of required weight space.
void rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size);
/// Getting the required scratchpad size and workspace size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] scratchpad_size Size of required scratchpad.
/// \param [out] workspace_size Size of required workspace.
void rnn_get_scratchpad_workspace_size(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc,
size_t *scratchpad_size, size_t *workspace_size);
/// Computing a specified rnn function value asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] dst_iter Pointer to output recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] dst_c_iter Pointer to output recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn forward operations.
sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter,
void *dst_iter,
const memory_desc_ext &iter_c_desc,
void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight,
size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Computing the data and weight gradient of a specified rnn function
/// asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] diff_dst_iter Pointer to differential output recurrent hidden state data.
/// \param [out] diff_src_iter Pointer to differential input recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] diff_dst_c_iter Pointer to differential output recurrent cell state data.
/// \param [out] diff_src_c_iter Pointer to differential input recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [out] diff_weight Pointer to differential weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn backward operations.
sycl::event async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src,
void *diff_src, const memory_desc_ext &iter_desc, void *src_iter,
void *diff_dst_iter, void *diff_src_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Getting the required state size for specified dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of state.
size_t get_dropout_state_size();
/// Getting the required workspace size for dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of workspace.
static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc);
/// Computing a specified dropout function value asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout forward operations.
sycl::event async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
void *workspace, size_t workspace_size);
/// Computing the gradient of a specified dropout function asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout backward operations.
sycl::event async_dropout_backward(dropout_desc &desc,
const memory_desc_ext &diff_dst_desc,
void *diff_dst,
const memory_desc_ext &diff_src_desc,
void *diff_src, void *workspace,
size_t workspace_size);
};
inline
void dropout_desc::restore(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("restore: state_size less than required state size.");
}
sycl::queue *q = engine.get_queue();
_imp->_p = p;
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size).wait();
_imp->_rng_engine =
oneapi::mkl::rng::load_state<rng_engine_t>(
*q, _imp->_host_state.data());
}
#endif
}
inline
void dropout_desc::set(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
_imp->_p = p;
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("set: no sufficient memory to save states.");
}
sycl::queue *q = engine.get_queue();
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
_imp->_rng_engine = rng_engine_t(*q, seed);
oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data());
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size).wait();
}
#endif
}
inline
::dnnl::memory::data_type
memory_desc_ext::to_dnnl_data_type(dpct::library_data_t dt) {
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dpct::library_data_t::real_half:
return dnnl_dt::f16;
case dpct::library_data_t::real_bfloat16:
return dnnl_dt::bf16;
case dpct::library_data_t::real_float:
return dnnl_dt::f32;
case dpct::library_data_t::real_int32:
return dnnl_dt::s32;
case dpct::library_data_t::real_int8:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8:
return dnnl_dt::u8;
case dpct::library_data_t::real_int8_4:
return dnnl_dt::s8;
case dpct::library_data_t::real_int8_32:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8_4:
return dnnl_dt::u8;
default:
throw std::runtime_error("to_dnnl_data_type: unsupported data type.");
}
}
inline
dpct::library_data_t
memory_desc_ext::to_dpct_library_data_t(::dnnl::memory::data_type dt,
unsigned block_size) {
using dpct_dt = dpct::library_data_t;
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dnnl_dt::f16:
return dpct_dt::real_half;
case dnnl_dt::bf16:
return dpct_dt::real_bfloat16;
case dnnl_dt::f32:
return dpct_dt::real_float;
case dnnl_dt::s32:
return dpct_dt::real_int32;
case dnnl_dt::s8:
if (block_size == 4) {
return dpct_dt::real_int8_4;
} else if (block_size == 32) {
return dpct_dt::real_int8_32;
} else {
return dpct_dt::real_int8;
}
case dnnl_dt::u8:
if (block_size == 4) {
return dpct_dt::real_uint8_4;
} else {
return dpct_dt::real_uint8;
}
default:
throw std::runtime_error("to_dpct_library_data_t: unsupported data type "
"dnnl::memory::data_type::undef.");
}
}
inline
::dnnl::memory::format_tag
memory_desc_ext::to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag) {
using dpct_dt = dpct::library_data_t;
using dpct_tag = memory_format_tag;
using dnnl_tag = ::dnnl::memory::format_tag;
switch (tag) {
case dpct_tag::nchw:
return dnnl_tag::nchw;
case dpct_tag::nhwc:
return dnnl_tag::nhwc;
default:
if (dt == dpct_dt::real_int8_32) {
return dnnl_tag::nChw32c;
} else {
return dnnl_tag::nChw4c;
}
}
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int n,
int c, int h, int w) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h, int w,
int n_stride, int c_stride, int h_stride,
int w_stride) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
{n_stride, c_stride, h_stride, w_stride});
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
{strides, strides + ndims});
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int ndims, const int dims[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(rnn_memory_format_tag tag, dpct::library_data_t dt,
int t, int n, int c) {
if (tag == rnn_memory_format_tag::tnc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::tnc);
} else if(tag == rnn_memory_format_tag::ntc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::ntc);
} else {
throw std::runtime_error("set: unsupported memory format tag.");
}
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c, int *h,
int *w, int *n_stride, int *c_stride, int *h_stride,
int *w_stride) const {
unsigned block_size = 1;
auto dims = _desc.get_dims();
auto inner_blks = _desc.get_inner_blks();
auto strides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
*n_stride = strides[0] / block_size;
*c_stride = strides[1] / block_size;
*h_stride = strides[2] / block_size;
*w_stride = strides[3] / block_size;
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, memory_format_tag *tag,
int *n, int *c, int *h, int *w) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
auto inner_blks = _desc.get_inner_blks();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (strides[1] == 1 && dims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, rnn_memory_format_tag *tag,
int *t, int *n, int *c) const {
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = rnn_memory_format_tag::tnc;
} else {
*tag = rnn_memory_format_tag::ntc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), 1);
*t = dims[0];
*n = dims[1];
*c = dims[2];
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
int *ndims, int dims[], int strides[]) const {
unsigned block_size = 1;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
strides[index] =
astrides[index] / block_size;
}
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims,
int dims[]) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (astrides[1] == 1 &&
adims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
}
}
inline
void engine_ext::get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num) {
if (!desc.is_zero()) {
auto dims = desc.get_dims();
auto strides = desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = ::dnnl::memory::format_tag::tnc;
*seq_length = dims[0];
*batch_size = dims[1];
} else {
*tag = ::dnnl::memory::format_tag::ntc;
*seq_length = dims[1];
*batch_size = dims[0];
}
}
if (direction == rnn_direction::bidirectional) {
*direction_num = 2;
} else {
*direction_num = 1;
}
if (mode == rnn_mode::lstm) {
*gate_num = 4;
} else if (mode == rnn_mode::gru) {
*gate_num = 3;
} else {
*gate_num = 1;
}
if (*projection_size != hidden_size) {
*output_size = *projection_size;
} else {
*projection_size = 0;
*output_size = hidden_size;
}
*dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt);
}
inline
void *engine_ext::allocate(const memory_desc_ext &data_desc, int count) const {
size_t mem_size = data_desc.get_size();
void *mem = sycl::malloc_device(mem_size * count, *_q);
return mem;
}
inline
void engine_ext::transform_no_zero(const memory_desc_ext &desc, void *src, void *dst) {
::dnnl::memory::data_type dt = desc.get_desc().get_data_type();
size_t element_num = desc.get_element_num();
switch (dt) {
case ::dnnl::memory::data_type::f32:
transform_no_zero_with_type<float>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::f16:
transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s32:
transform_no_zero_with_type<int32_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s8:
transform_no_zero_with_type<int8_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::u8:
transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num);
break;
default:
throw std::runtime_error("transform_no_zero: unsupported data type.");
}
}
inline
::dnnl::memory::desc
engine_ext::get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc) {
if (group_count == 1) {
return weight_desc.get_desc();
}
auto help_weight_desc = weight_desc.get_desc();
int ndims = help_weight_desc.get_ndims();
if (!help_weight_desc.get_inner_blks().empty()) {
throw std::runtime_error("get_group_weight_desc: group convolution with "
"blocked weight memory unimplemented.");
}
std::vector<int64_t> new_size;
auto old_size = weight_desc.get_dims();
new_size.push_back(group_count);
new_size.push_back(old_size[0] / group_count);
for (int index = 1; index < old_size.size(); index++) {
new_size.push_back(old_size[index]);
}
std::vector<int64_t> strides = help_weight_desc.get_strides();
::dnnl::memory::format_tag tag;
bool is_nhwc = (strides[1] == 1 && old_size[1] != 1);
if (ndims == 4) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::gohwi;
} else {
tag = ::dnnl::memory::format_tag::goihw;
}
} else if (ndims == 5) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::godhwi;
} else {
tag = ::dnnl::memory::format_tag::goidhw;
}
}
help_weight_desc =
::dnnl::memory::desc(new_size, weight_desc.get_desc().get_data_type(), tag);
return help_weight_desc;
}
inline
::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
auto inner_blks = desc.get_inner_blks();
assert(ndims >= 4 && "ndims is at least 4.");
std::vector<int64_t> compressed_dims(ndims);
compressed_dims[0] = dims[0];
compressed_dims[1] = dims[1];
for (int index = 2; index < ndims; index++) {
compressed_dims[1] = compressed_dims[1] * dims[index];
compressed_dims[index] = 1;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw4c);
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw32c);
}
std::vector<int64_t> strides(ndims, 1);
strides[0] = compressed_dims[1];
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides);
}
inline
::dnnl::memory::desc
engine_ext::get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
assert(ndims >= 4 && "ndims is at least 4.");
int channel_num = 1;
if (mode == batch_normalization_mode::spatial) {
channel_num = dims[1];
} else {
for (int index = 1; index < ndims; index++) {
channel_num = channel_num * dims[index];
}
}
return ::dnnl::memory::desc({channel_num}, desc.get_data_type(),
::dnnl::memory::format_tag::a);
}
inline
::dnnl::memory::desc engine_ext::transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc) {
if (!desc.get_inner_blks().empty()) {
return desc;
}
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
if (ndims == 4) {
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::nchw);
}
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::ncdhw);
}
/// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out +
/// beta * prior_dst) have no change. In this case this function returns true
/// means the operation can exit directly.
inline
bool engine_ext::scale_parameter_preprocess(
const std::vector<output_argument_info> &args) {
bool direct_exit = true;
for (auto &arg : args) {
if (arg._alpha == 0.f) {
if (arg._beta != 1.f) {
async_scale(arg._beta, arg._desc, arg._data);
}
} else {
direct_exit = false;
}
}
return direct_exit;
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode) {
derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode);
derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode);
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode) {
int src_ndims = src_desc.get_desc().get_ndims();
auto inner_blks = src_desc.get_desc().get_inner_blks();
if (src_desc.get_desc().get_ndims() != 4 ||
src_desc.get_desc().get_ndims() != 5) {
throw std::runtime_error("derive_batch_normalization_memory_desc: only 4d "
"and 5d memory descriptor supported.");
}
std::vector<int64_t> dims = src_desc.get_dims();
dims[0] = 1;
if (mode == batch_normalization_mode::spatial) {
dims[2] = 1;
dims[3] = 1;
if (src_ndims == 5) {
dims[4] = 1;
}
}
auto data_type = src_desc.get_desc().get_data_type();
if (data_type == ::dnnl::memory::data_type::f16) {
data_type = ::dnnl::memory::data_type::f32;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw4c));
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw32c));
} else {
if (src_ndims == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nchw));
} else {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::ncdhw));
}
}
}
template <typename primitive_type>
sycl::event engine_ext::execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &output_args,
const std::vector<void *> &device_ptrs) {
std::vector<void *> caches;
int output_arg_num = output_args.size();
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
auto cache = allocate(output_args[i]._desc);
caches.push_back(cache);
args->insert(
{output_args[i]._name,
::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)});
} else {
args->insert(
{output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(),
_eng, output_args[i]._data)});
}
}
auto e = ::dnnl::sycl_interop::execute(
*(static_cast<primitive_type *>(primitive.second)), _s, *args);
_primitive_cache.put(
primitive.first, primitive.second,
[](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e);
int cache_index = 0;
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
e = async_sum(output_args[i]._alpha, output_args[i]._desc,
caches[cache_index++], output_args[i]._beta, output_args[i]._desc,
output_args[i]._data);
} else {
if (output_args[i]._alpha != 1.f) {
e = async_scale(output_args[i]._alpha, output_args[i]._desc,
output_args[i]._data);
}
}
}
caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end());
async_free(_q, e, args, caches);
return e;
}
inline
::dnnl::memory::desc engine_ext::bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches) {
::dnnl::memory::desc result;
result = transfer_memory_desc_to_channel_major_format(desc);
if ((result != desc) || !src) {
*cache = allocate(desc);
if (is_input && src) {
async_reorder(1.f, desc, src, 0.f, result, *cache);
}
caches.push_back(*cache);
}
return result;
}
inline
sycl::event engine_ext::batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) {
if (scale_parameter_preprocess(
{{alpha_data, beta_data, diff_src_desc, diff_src},
{alpha_param, beta_param, diff_scale_bias_desc, diff_scale},
{alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_diff_dst = nullptr,
*reordered_diff_src = nullptr, *reordered_scale = nullptr,
*reordered_bias = nullptr, *reordered_diff_scale = nullptr,
*reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr,
*reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_diff_scale_bias_desc =
diff_scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc;
::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_scale_bias_desc, scale, &reordered_scale, caches);
actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (bias) {
bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc, bias,
&reordered_bias, caches);
}
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_scale, &reordered_diff_scale,
caches);
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_bias, &reordered_diff_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches);
bn_reorder_memory_to_channel_major_format(true, help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
} else {
if ((help_src_desc != help_diff_dst_desc) ||
(help_src_desc != help_diff_src_desc) ||
(help_diff_dst_desc != help_diff_src_desc)) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
}
}
help_diff_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
auto forward_primitive =
create_primitive_desc<::dnnl::batch_normalization_forward>(
::dnnl::prop_kind::forward_training, help_src_desc,
help_diff_dst_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift);
auto primitive =
create_primitive<::dnnl::batch_normalization_backward>(
::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc,
help_src_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift, forward_primitive);
void *dst_cache = nullptr;
if (!saved_mean && !saved_var) {
dst_cache = allocate(diff_dst_desc);
if (!reordered_saved_mean) {
reordered_saved_mean = allocate(mean_var_desc);
caches.push_back(reordered_saved_mean);
}
if (!reordered_saved_var) {
reordered_saved_var = allocate(mean_var_desc);
caches.push_back(reordered_saved_var);
}
if (!bias) {
_q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size());
}
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc,
dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias,
mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr,
nullptr);
caches.push_back(dst_cache);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_diff_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_src_desc, _eng,
reordered_diff_dst ? reordered_diff_dst : diff_dst)}}};
sycl::event e = execute_primitive(
primitive, execution_args,
{{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc,
reordered_diff_src ? reordered_diff_src : diff_src},
{alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc,
reordered_diff_scale ? reordered_diff_scale : diff_scale},
{alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc,
reordered_diff_bias ? reordered_diff_bias : diff_bias}});
if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) {
e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f,
diff_src_desc, diff_src);
}
if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() &&
reordered_diff_scale && reordered_diff_bias) {
async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f,
diff_scale_bias_desc, diff_scale);
e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias, 0.f,
diff_scale_bias_desc, diff_bias);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_dst = nullptr,
*reordered_scale = nullptr, *reordered_bias = nullptr,
*reordered_saved_mean = nullptr, *reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_dst_desc = help_dst_desc;
::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
help_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_scale_bias_desc, scale, &reordered_scale, caches);
bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias,
&reordered_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_mean,
&reordered_saved_mean, caches);
actual_mean_var_desc = help_mean_var_desc;
bn_reorder_memory_to_channel_major_format(is_infer,
help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
} else {
if (help_src_desc != help_dst_desc) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
}
}
help_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
::dnnl::prop_kind kind;
::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift;
if (is_infer) {
kind = ::dnnl::prop_kind::forward_inference;
flag = ::dnnl::normalization_flags::use_global_stats | flag;
} else {
kind = ::dnnl::prop_kind::forward_training;
}
auto primitive =
create_primitive<::dnnl::batch_normalization_forward>(
kind, help_src_desc, help_dst_desc, epsilon, flag);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_SHIFT,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_bias ? reordered_bias : bias)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var
: saved_var)}}};
sycl::event e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, help_dst_desc,
reordered_dst ? reordered_dst : dst}});
if (!is_infer && running_var) {
auto src_ndim = src_desc.get_desc().get_ndims();
auto src_dims = src_desc.get_dims();
int element_num = src_dims[0];
if (mode == batch_normalization_mode::spatial) {
for (int index = 2; index < src_ndim; index++) {
element_num *= src_dims[index];
}
}
float unbias_factor = element_num / (element_num - 1.f);
async_scale(1.f - factor, mean_var_desc, running_var);
e = async_sum(factor * unbias_factor, mean_var_desc,
reordered_saved_var ? reordered_saved_var : saved_var,
1.f, mean_var_desc, running_var);
}
if (!is_infer && running_mean) {
e = async_sum(factor, mean_var_desc,
reordered_saved_mean ? reordered_saved_mean : saved_mean,
(1.f - factor), mean_var_desc, running_mean);
}
if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) {
e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst);
}
if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean &&
saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) {
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f,
mean_var_desc, saved_mean);
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f,
mean_var_desc, saved_var);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
std::vector<void *> data = {src, dst, src_iter, dst_iter,
src_iter_c, dst_iter_c, weight, workspace,
scratchpad};
std::vector<int> offset(6, 0);
void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr;
sycl::event e;
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
// Here to combine the oneDNN bidirectional_sum and
// bidirectional_concat config, so call execute_rnn_forward_primitive
// twice.
if (layer_size > 1) {
if (!is_get_execution_args) {
input_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
_q->memcpy(input_layer_cache, src, src_desc.get_size());
}
data[0] = input_layer_cache;
data[1] = hidden_layer_cache;
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, 1, direction_num, hidden_size, gate_num, projection_size,
data, offset, layer_size - 1, weight_size_query, workspace_size_query,
scratchpad_size_query);
data[0] =
((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache;
data[1] = dst;
}
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
2 * output_size, 1, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
} else {
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
}
if (is_get_execution_args) {
return e;
}
if (input_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(input_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num,
int projection_size, std::vector<void *> &data, std::vector<int> &offset,
int iter_num, size_t *weight_size, size_t *workspace_size,
size_t *scratchpad_size) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
std::unordered_map<int, ::dnnl::memory> *execution_args;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
}
if (weight_size) {
*weight_size +=
(weight_layer_desc.get_size() + weight_iter_desc.get_size() +
projection_desc.get_size() + bias_desc.get_size()) *
iter_num;
return e;
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
kind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::gru) {
auto pd = create_primitive_desc<::dnnl::gru_forward>(
kind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::lstm) {
auto pd = create_primitive_desc<::dnnl::lstm_forward>(
kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd);
key = r.first;
p = r.second;
}
}
for (int i = 0; i < iter_num; i++) {
void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr,
*dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]);
if (mode == rnn_mode::lstm) {
dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2];
}
if (!workspace_size) {
execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}},
{DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[8])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}});
offset += d.get_size();
};
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]);
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6],
offset[4]);
insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6],
offset[4]);
}
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size());
}
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]);
if (kind == ::dnnl::prop_kind::forward_training) {
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]);
}
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
execute_primitive<::dnnl::vanilla_rnn_forward>(
{key, static_cast<::dnnl::vanilla_rnn_forward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
execute_primitive<::dnnl::gru_forward>(
{key, static_cast<::dnnl::gru_forward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
execute_primitive<::dnnl::lstm_forward>(
{key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[0], data[1]);
}
}
if (kind == ::dnnl::prop_kind::forward_training) {
if (workspace_size) {
*workspace_size +=
(src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size());
if (mode == rnn_mode::lstm) {
*workspace_size += iter_c_desc.get_size();
}
} else {
_q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache,
src_desc.get_size());
offset[5] += src_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache,
dst_desc.get_size());
offset[5] += dst_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache,
iter_desc.get_size());
offset[5] += iter_desc.get_size();
if (mode == rnn_mode::lstm) {
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache,
iter_c_desc.get_size());
offset[5] += iter_c_desc.get_size();
}
}
}
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training;
::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc diff_weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc diff_weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc projection_desc, diff_projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
diff_projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldoi);
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
fkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>(
bkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::gru) {
auto fpd = create_primitive_desc<::dnnl::gru_forward>(
fkind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::gru_backward>(
bkind, direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::lstm) {
auto fpd = create_primitive_desc<::dnnl::lstm_forward>(
fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
auto pd = create_primitive_desc<::dnnl::lstm_backward>(
bkind, direction, src_desc, iter_desc, iter_c_desc,
diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(),
diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc,
src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc,
::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc,
iter_c_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd);
key = r.first;
p = r.second;
}
for (int i = 0; i < iter_num; i++) {
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}},
{DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[15])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
offset += d.get_size();
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}});
};
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
}
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]);
insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]);
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]);
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6],
offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]);
insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]);
insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]);
}
insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]);
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size());
}
if (projection_size) {
insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14],
offset[8]);
}
insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14],
offset[8]);
insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14],
offset[8]);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
e = execute_primitive<::dnnl::vanilla_rnn_backward>(
{key, static_cast<::dnnl::vanilla_rnn_backward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
e = execute_primitive<::dnnl::gru_backward>(
{key, static_cast<::dnnl::gru_backward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
e = execute_primitive<::dnnl::lstm_backward>(
{key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[8], data[9]);
}
}
return e;
}
#define GENERATE_RNN_PRIMITIVE_KEY(name) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \
<< (std::uint8_t)pd.get_algorithm(); \
serialize_mem_desc(ss, pd.src_layer_desc()); \
serialize_mem_desc(ss, pd.src_iter_desc()); \
serialize_mem_desc(ss, pd.dst_layer_desc()); \
serialize_mem_desc(ss, pd.dst_iter_desc()); \
serialize_mem_desc(ss, pd.diff_src_layer_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_desc()); \
serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \
serialize_mem_desc(ss, pd.src_iter_c_desc()); \
serialize_mem_desc(ss, pd.dst_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \
return ss.str(); \
}
#define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_algorithm() \
<< (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \
<< (std::uint8_t)pd.get_group_size(); \
serialize_dims(ss, pd.get_strides()); \
serialize_dims(ss, pd.get_dilations()); \
serialize_dims(ss, pd.get_padding_l()); \
serialize_mem_desc(ss, pd.src_desc()); \
serialize_mem_desc(ss, pd.diff_src_desc()); \
serialize_mem_desc(ss, pd.dst_desc()); \
serialize_mem_desc(ss, pd.diff_dst_desc()); \
serialize_mem_desc(ss, pd.query_type()); \
serialize_mem_desc(ss, pd.weights_desc()); \
serialize_mem_desc(ss, pd.diff_weights_desc()); \
return ss.str(); \
}
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward)
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_forward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_backward)
GENERATE_RNN_PRIMITIVE_KEY(gru_forward)
GENERATE_RNN_PRIMITIVE_KEY(gru_backward)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc)
template <typename primitive_desc_type>
std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) {
std::stringstream ss;
auto kind = pd.get_kind();
ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind()
<< (std::uint8_t)pd.get_algorithm();
serialize_mem_desc(ss, pd.src_desc());
serialize_mem_desc(ss, pd.diff_src_desc());
serialize_mem_desc(ss, pd.dst_desc());
serialize_mem_desc(ss, pd.diff_dst_desc());
switch (kind) {
case ::dnnl::primitive::kind::batch_normalization:
ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags();
case ::dnnl::primitive::kind::reduction:
ss << pd.get_p();
break;
case ::dnnl::primitive::kind::eltwise:
ss << pd.get_alpha() << pd.get_beta();
case ::dnnl::primitive::kind::lrn:
ss << pd.get_k();
break;
case ::dnnl::primitive::kind::pooling:
serialize_dims(ss, pd.get_strides());
serialize_dims(ss, pd.get_dilations());
serialize_dims(ss, pd.get_padding_l());
serialize_dims(ss, pd.get_kernel());
break;
case ::dnnl::primitive::kind::softmax:
ss << pd.get_axis();
break;
default:
break;
}
return ss.str();
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive(args_type &&...args) {
auto pd =
create_primitive_desc<primitive_type>(std::forward<args_type>(args)...);
return create_primitive_with_pd<primitive_type>(pd);
}
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive_with_pd(
const typename primitive_type::primitive_desc &pd) {
detail::primitive_cache_key_type key = generate_cache_key(pd);
primitive_type *p = (primitive_type *)_primitive_cache.get(key);
if (!p) {
p = new primitive_type(pd);
}
return {key, p};
}
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
engine_ext::create_primitive_desc(args_type &&...args) {
return typename primitive_type::primitive_desc(
_eng, std::forward<args_type>(args)...);
}
inline
void engine_ext::fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
async_fill(src_desc, src, valuePtr).wait();
}
inline
void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
async_scale(alpha, src_desc, src).wait();
}
inline
void engine_ext::sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst,
::dnnl::memory *workspace) {
async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst,
workspace).wait();
}
inline
void engine_ext::pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src,
workspace)
.wait();
}
inline
void engine_ext::softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src) {
async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc,
diff_dst, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace)
.wait();
}
inline
void engine_ext::lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace) {
async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src, workspace)
.wait();
}
inline
sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type();
unsigned mem_size = src_desc.get_size();
switch (dt) {
case ::dnnl::memory::data_type::f32:
return fill_with_type<float>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::f16:
return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s32:
return fill_with_type<int32_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s8:
return fill_with_type<int8_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::u8:
return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size);
default:
throw std::runtime_error("async_fill: unsupported data type.");
}
}
inline
sycl::event engine_ext::async_reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng,
dst_desc.get_desc());
auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
if (alpha == 1.f) {
return sycl::event();
}
void *src_cache = allocate(src_desc);
_q->memcpy(src_cache, src, src_desc.get_size());
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear,
src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f);
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}};
return execute_primitive(primitive, args, {}, {src_cache});
}
inline sycl::event
engine_ext::async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
void *dst_cache = allocate(dst_desc);
_q->memcpy(dst_cache, dst, dst_desc.get_size());
auto pd = create_primitive_desc<::dnnl::sum>(
std::vector<float>{alpha, beta},
std::vector<::dnnl::memory::desc>{src_desc.get_desc(),
dst_desc.get_desc()});
std::stringstream ss;
ss << (std::uint8_t)pd.get_kind() << alpha << beta;
serialize_mem_desc(ss, pd.src_desc(0));
serialize_mem_desc(ss, pd.src_desc(1));
detail::primitive_cache_key_type key = ss.str();
::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key);
if (!p) {
p = new ::dnnl::sum(pd);
}
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)},
{DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_MULTIPLE_SRC + 1,
::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}};
return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache});
}
inline
sycl::event engine_ext::async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst) {
::dnnl::algorithm onednn_algorithm;
switch (op) {
case binary_op::max:
onednn_algorithm = ::dnnl::algorithm::binary_max;
break;
case binary_op::min:
onednn_algorithm = ::dnnl::algorithm::binary_min;
break;
case binary_op::add:
onednn_algorithm = ::dnnl::algorithm::binary_add;
break;
case binary_op::sub:
onednn_algorithm = ::dnnl::algorithm::binary_sub;
break;
case binary_op::mul:
onednn_algorithm = ::dnnl::algorithm::binary_mul;
break;
case binary_op::div:
onednn_algorithm = ::dnnl::algorithm::binary_div;
break;
case binary_op::sqrt:
onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt;
break;
case binary_op::neg:
onednn_algorithm = ::dnnl::algorithm::eltwise_linear;
break;
}
if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt ||
onednn_algorithm == ::dnnl::algorithm::eltwise_linear) {
void *src_cache = nullptr, *dst_cache = nullptr;
src_cache = allocate(src_desc_0);
dst_cache = allocate(dst_desc);
_q->memcpy(src_cache, src_0, src_desc_0.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_cache);
async_scale(beta, dst_desc, dst_cache);
// Let the output = 1 - input to simulate the behavior of neg.
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, onednn_algorithm,
src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}};
execute_primitive(
primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(src_cache, *_q);
sycl::free(dst_cache, *_q);
});
});
return e;
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{};
void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr;
src_0_cache = allocate(src_desc_0);
src_1_cache = allocate(src_desc_1);
dst_cache = allocate(dst_desc);
_q->memcpy(src_0_cache, src_0, src_desc_0.get_size());
_q->memcpy(src_1_cache, src_1, src_desc_1.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_0_cache);
async_scale(alpha_1, src_desc_1, src_1_cache);
async_scale(beta, dst_desc, dst_cache);
execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(),
_eng, src_0_cache)});
execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(),
_eng, src_1_cache)});
auto primitive = create_primitive<::dnnl::binary>(
onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(dst_cache, *_q);
sycl::free(src_0_cache, *_q);
sycl::free(src_1_cache, *_q);
});
});
return e;
}
inline
sycl::event engine_ext::async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
float p = 2.f;
::dnnl::algorithm onednn_algorithm;
void *cache = nullptr;
switch (op) {
case reduction_op::amax:
cache = allocate(src_desc);
activation_desc adesc;
adesc.set_algorithm(::dnnl::algorithm::eltwise_abs);
async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_max;
src = cache;
break;
case reduction_op::max:
onednn_algorithm = ::dnnl::algorithm::reduction_max;
break;
case reduction_op::min:
onednn_algorithm = ::dnnl::algorithm::reduction_min;
break;
case reduction_op::sum:
onednn_algorithm = ::dnnl::algorithm::reduction_sum;
break;
case reduction_op::mean:
onednn_algorithm = ::dnnl::algorithm::reduction_mean;
break;
case reduction_op::mul:
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
break;
case reduction_op::mul_no_zeros:
cache = allocate(src_desc);
transform_no_zero(src_desc, src, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
src = cache;
break;
case reduction_op::norm1:
p = 1.f;
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum;
break;
case reduction_op::norm2:
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum;
break;
}
auto primitive = create_primitive<::dnnl::reduction>(
onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}};
if (cache) {
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}},
{cache});
}
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc data_desc = dst_desc.get_desc();
auto alg = desc.get_algorithm();
if ((alg == ::dnnl::algorithm::eltwise_clip) ||
(alg == ::dnnl::algorithm::eltwise_linear) ||
(alg == ::dnnl::algorithm::eltwise_swish)) {
data_desc = src_desc.get_desc();
}
auto primitive = create_primitive<::dnnl::eltwise_backward>(
alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc,
desc.get_alpha(), desc.get_beta(),
create_primitive_desc<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, alg, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive_desc =
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive = create_primitive<::dnnl::pooling_backward>(
desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(),
desc.get_padding(),
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_softmax_forward(softmax_algorithm alg,
softmax_mode mode, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_src_desc,
help_dst_desc, 1);
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}});
}
inline
sycl::event engine_ext::async_softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_backward>(
softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1,
create_primitive_desc<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc,
help_dst_desc, 1));
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC,
memory_desc_ext(help_diff_src_desc), diff_src}});
}
inline
sycl::event engine_ext::async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event
engine_ext::async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::lrn_backward>(
::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(),
diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(),
desc.get_alpha(), desc.get_beta(), desc.get_k(),
create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
size_t engine_ext::get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc) {
if(ops == batch_normalization_ops::none) {
return 0;
}
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var) {
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean,
var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
std::vector<void *> caches;
if (has_post_op) {
void *dst_cache = allocate(dst_desc);
caches.push_back(dst_cache);
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr,
nullptr);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache);
}
async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc,
dst_cache);
e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var) {
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
if (has_post_op) {
if(workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_forward_training_ex: "
"no sufficient workspace.");
}
batch_normalization_forward_internal(
false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc,
workspace, scale_bias_desc, scale, bias, mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc,
workspace);
}
return async_activation_forward(adesc, alpha, dst_desc, workspace,
beta, dst_desc, dst);
}
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var,
running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_forward_training(
mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc,
dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias,
scale_bias_mean_var_desc, running_mean, running_var, saved_mean,
saved_var, workspace_size, workspace);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var) {
return batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst,
beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
std::vector<void *> caches;
::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc();
void *real_diff_dst = diff_dst;
if (ops != batch_normalization_ops::none &&
workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_backward_ex: "
"no sufficient workspace.");
}
if (ops == batch_normalization_ops::add_activation) {
void *diff_summand_cache = allocate(diff_summand_desc);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst,
dst_desc, workspace, 0.f,
diff_summand_desc, diff_summand_cache);
caches.push_back(diff_summand_cache);
async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data,
diff_summand_desc, diff_summand);
real_diff_dst_desc = diff_summand_desc.get_desc();
real_diff_dst = diff_summand_cache;
} else if (ops == batch_normalization_ops::activation) {
void *diff_dst_cache = allocate(diff_dst_desc);
caches.push_back(diff_dst_cache);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace,
0.f, diff_dst_desc, diff_dst_cache);
real_diff_dst = diff_dst_cache;
}
sycl::event e = batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc,
real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias,
mean_var_desc, saved_mean, saved_var);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_backward(
mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst,
diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src,
diff_summand_desc, diff_summand, alpha_param,
diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var,
workspace_size, workspace);
}
inline
sycl::event
engine_ext::async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto origin_src_md = src_desc.get_desc();
auto origin_dst_md = dst_desc.get_desc();
auto origin_weight_md = help_weight_desc;
auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md);
auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md);
auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md);
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md,
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
::dnnl::convolution_forward::primitive_desc pd =
::dnnl::convolution_forward::primitive_desc(
const_cast<dnnl_primitive_desc_t>(
primitive.second->get_primitive_desc()));
auto optimal_src_md = pd.src_desc();
auto optimal_dst_md = pd.dst_desc();
auto optimal_weight_md = pd.weights_desc();
void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight;
std::vector<void *> input_caches, output_caches;
allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md,
optimal_src, input_caches);
allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md,
optimal_dst, output_caches);
allocate_and_reorder_memory_to_optimal(origin_weight_md, weight,
optimal_weight_md, optimal_weight,
input_caches);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}};
auto e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}},
input_caches);
if(origin_dst_md != optimal_dst_md){
e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md, dst);
}
async_free(_q, e, nullptr, output_caches);
return e;
}
inline
sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst) {
int channel_num = bias_desc.get_element_num();
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::memory::desc help_bias_desc = {{channel_num},
bias_desc.get_desc().get_data_type(),
::dnnl::memory::format_tag::a};
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(),
help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}};
void *cache = nullptr;
if (alpha_0 != 1.f) {
cache = allocate(help_weight_desc);
_q->memcpy(cache, weight, weight_desc.get_size());
async_scale(alpha_0, help_weight_desc, cache);
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache});
} else {
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
}
async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst);
return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc, dst);
}
inline
sycl::event engine_ext::async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_data>(
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight) {
if (scale_parameter_preprocess(
{{alpha, beta, diff_weight_desc, diff_weight}})) {
return sycl::event();
}
auto help_diff_weight_desc =
get_group_weight_desc(desc.get_group_count(), diff_weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive =
create_primitive<::dnnl::convolution_backward_weights>(
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_WEIGHTS,
help_diff_weight_desc, diff_weight}});
}
inline
sycl::event engine_ext::async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) {
return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst, beta,
diff_bias_desc, diff_bias);
}
inline
void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size) {
*weight_space_size = 0;
rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference,
memory_desc_ext(), nullptr, memory_desc_ext(), nullptr,
memory_desc_ext(), nullptr, nullptr, memory_desc_ext(),
nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true,
weight_space_size, nullptr, nullptr);
return;
}
inline
void engine_ext::rnn_get_scratchpad_workspace_size(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, size_t *scratchpad_size,
size_t *workspace_size) {
*workspace_size = 0;
*scratchpad_size = 0;
rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(),
nullptr, memory_desc_ext(), nullptr, nullptr,
memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0,
nullptr, 0, nullptr, true, nullptr, workspace_size,
scratchpad_size);
return;
}
inline
sycl::event engine_ext::async_rnn_forward(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
return rnn_forward_internal(
desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter,
iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size,
workspace, scratchpad_size, scratchpad, false, nullptr, nullptr,
nullptr);
}
inline
sycl::event engine_ext::async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src,
const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter,
void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_memory_format_tag format_tag;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
void *last_layer_cache = nullptr;
void *hidden_layer_cache = nullptr;
sycl::event e;
std::vector<int> offset(9, 0);
std::vector<void *> data = {
src,
dst,
(uint8_t *)src_iter + iter_desc.get_size(),
nullptr,
(uint8_t *)src_iter_c + iter_c_desc.get_size(),
nullptr,
(uint8_t *)weight + weight_size,
(uint8_t *)workspace + workspace_size,
diff_src,
diff_dst,
(uint8_t *)diff_src_iter + iter_desc.get_size(),
(uint8_t *)diff_dst_iter + iter_desc.get_size(),
(uint8_t *)diff_src_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_weight + weight_size,
scratchpad};
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
if (layer_size > 1) {
last_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
data[8] = last_layer_cache;
}
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset, 1);
if (layer_size > 1) {
data[8] = hidden_layer_cache;
data[9] = last_layer_cache;
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset,
layer_size - 1);
_q->memcpy(diff_src,
((layer_size - 1) % 2 == 0) ? last_layer_cache
: hidden_layer_cache,
src_desc.get_size());
}
} else {
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1);
}
if (last_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(last_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
size_t engine_ext::get_dropout_state_size(){
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::queue q;
if(_random_engine_state_size == -1) {
if(_q){
q = *_q;
} else {
q = dpct::get_current_device().default_queue();
}
auto rand_engine = rng_engine_t(q, 0);
_random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine);
}
return _random_engine_state_size;
#endif
}
inline size_t
engine_ext::get_dropout_workspace_size(const memory_desc_ext &src_desc) {
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc,
void *src,
const memory_desc_ext &dst_desc,
void *dst, void *workspace,
size_t workspace_size) {
if (workspace_size < src_desc.get_size()) {
throw std::runtime_error("async_dropout_forward: no sufficient workspace.");
}
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(dst, 0, dst_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst);
}
float scale_factor = 1.f / (1.f - p);
void *cache = workspace;
memory_desc_ext rng_data_desc(
::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32,
src_desc.get_strides()));
if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) {
cache = allocate(rng_data_desc);
}
desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(),
(std::int32_t *)cache);
if (cache == workspace) {
async_scale(scale_factor, src_desc, workspace);
} else {
async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args);
if (cache != workspace) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { sycl::free(cache, *_q); });
});
}
return e;
}
inline
sycl::event engine_ext::async_dropout_backward(
dropout_desc &desc, const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src,
void *workspace, size_t workspace_size) {
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(diff_src, 0, diff_src_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc,
diff_src);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)},
{DNNL_ARG_SRC_1,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(),
diff_dst_desc.get_desc(), diff_src_desc.get_desc());
return execute_primitive(primitive, execution_args);
}
} // namespace dnnl
} // namespace dpct
#endif // __DPCT_DNNL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/lapack_utils.hpp | //==---- lapack_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LAPACK_UTILS_HPP__
#define __DPCT_LAPACK_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace lapack {
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The symmetric matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The symmetric matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T>
inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
T *w, T *scratchpad, int scratchpad_size, int *info) {
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<T>(a);
auto b_buffer = get_buffer<T>(b);
auto w_buffer = get_buffer<T>(w);
auto scratchpad_buffer = get_buffer<T>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w,
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes all the eigenvalues, and optionally, the eigenvectors of a complex
/// generalized Hermitian positive-definite eigenproblem using a divide and
/// conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The Hermitian matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The Hermitian matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [in] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename Tw>
inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
Tw *w, T *scratchpad, int scratchpad_size, int *info) {
using Ty = typename DataType<T>::T2;
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<Ty>(a);
auto b_buffer = get_buffer<Ty>(b);
auto w_buffer = get_buffer<Tw>(w);
auto scratchpad_buffer = get_buffer<Ty>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda, (Ty *)b,
ldb, w, (Ty *)scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian,
/// for complex data) positive-definite matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
T *a[], int lda, int *info, int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t lda_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->lda_info = lda;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrf_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a,
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrf_batch_scratchpad_size/potrf_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
/// Solves a batch of systems of linear equations with a Cholesky-factored
/// symmetric (Hermitian) positive-definite coefficient matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] nrhs The number of right-hand sides.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b Array of pointers to matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
int nrhs, T *a[], int lda, T *b[], int ldb, int *info,
int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t nrhs_info;
std::int64_t lda_info;
std::int64_t ldb_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->nrhs_info = nrhs;
matrix_info->lda_info = lda;
matrix_info->ldb_info = ldb;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), &(matrix_info->lda_info),
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrs_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b,
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrs_batch_scratchpad_size/potrs_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
namespace detail {
template <template <typename> typename functor_t, typename... args_t>
inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info,
std::string const &lapack_api_name, args_t &&...args) {
auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
<< lapack_api_name << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl
<< "detail: " << e.detail() << std::endl;
if (e.info() < std::numeric_limits<int>::min() ||
e.info() > std::numeric_limits<int>::max()) {
throw std::runtime_error("e.info() exceeds the limit of int type");
}
int info_val = static_cast<int>(e.info());
if (info)
dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int),
memcpy_direction::host_to_device)
.wait();
return 1;
};
try {
switch (a_type) {
case library_data_t::real_float: {
functor_t<float>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::real_double: {
functor_t<double>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_float: {
functor_t<std::complex<float>>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_double: {
functor_t<std::complex<double>>()(std::forward<args_t>(args)...);
break;
}
default:
throw std::runtime_error("the data type is unsupported");
}
} catch (oneapi::mkl::lapack::batch_error const &be) {
try {
std::rethrow_exception(be.exceptions()[0]);
} catch (oneapi::mkl::lapack::exception &e) {
return handle_lapack_exception(e);
}
} catch (oneapi::mkl::lapack::exception const &e) {
return handle_lapack_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
if (info)
dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait();
return 1;
}
return 0;
}
template <typename T> class working_memory {
public:
working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) {
_ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q);
}
auto get_memory() {
return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr));
}
auto get_ptr() {
return _ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_ptr) {
dpct::async_dpct_free({_ptr}, {_e}, _q);
}
}
private:
void *_ptr = nullptr;
sycl::event _e;
sycl::queue _q;
};
std::size_t byte_to_element_number(std::size_t size_in_byte,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
size_in_byte,
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] /
8);
if (dv.rem) {
throw std::runtime_error(
"size_in_byte is not divisible by the size of element (in bytes)");
}
return dv.quot;
}
std::size_t element_number_to_byte(std::size_t size_in_element,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)],
8);
if (dv.rem) {
throw std::runtime_error(
"the size of element (in bits) is not divisible by 8");
}
return size_in_element * dv.quot;
}
inline oneapi::mkl::jobsvd char2jobsvd(signed char job) {
switch (job) {
case 'A':
return oneapi::mkl::jobsvd::vectors;
case 'S':
return oneapi::mkl::jobsvd::somevec;
case 'O':
return oneapi::mkl::jobsvd::vectorsina;
case 'N':
return oneapi::mkl::jobsvd::novec;
default:
throw std::runtime_error("the job type is unsupported");
}
}
template <typename T> struct getrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct getrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>(
q, trans, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data,
b_data, ldb, device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct geqrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct geqrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrfnp_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::int64_t a_stride = m * lda;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct gesvd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t u_type, std::int64_t ldu,
library_data_t vt_type, std::int64_t ldvt,
std::size_t &device_ws_size) {
device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>(
q, jobu, jobvt, m, n, lda, ldu, ldvt);
}
};
template <typename T> struct ElementType {
using value_tpye = T;
};
template <typename T> struct ElementType<std::complex<T>> {
using value_tpye = T;
};
template <typename T> struct gesvd_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto s_data = dpct::detail::get_memory(
reinterpret_cast<typename ElementType<T>::value_tpye *>(s));
auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u));
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data,
u_data, ldu, vt_data, ldvt, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct gesvd_conj_impl : public gesvd_impl<T> {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using base = gesvd_impl<T>;
base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u,
ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans,
n, n, T(1.0f), vt_data, ldvt, ldvt);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct potrf_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda);
}
};
template <typename T> struct potrf_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct potrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>(
q, uplo, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb,
device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct value_type_trait {
using value_type = T;
};
template <typename T> struct value_type_trait<std::complex<T>> {
using value_type = T;
};
template <typename T> auto lamch_s() {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
if constexpr (std::is_same_v<T, float>) {
return slamch("S");
} else if constexpr (std::is_same_v<T, double>) {
return dlamch("S");
}
throw std::runtime_error("the type is unsupported");
#endif
}
#define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
#define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
template <typename T> struct syheevx_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, void *vl, void *vu,
std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range,
uplo, n, lda, vl_value, vu_value, il, iu,
abstol, lda);
#endif
}
};
template <typename T> constexpr library_data_t get_library_data_t_from_type() {
if constexpr (std::is_same_v<T, float>) {
return library_data_t::real_float;
} else if constexpr (std::is_same_v<T, double>) {
return library_data_t::real_double;
} else if constexpr (std::is_same_v<T, sycl::float2> ||
std::is_same_v<T, std::complex<float>>) {
return library_data_t::complex_float;
} else if constexpr (std::is_same_v<T, sycl::double2> ||
std::is_same_v<T, std::complex<double>>) {
return library_data_t::complex_double;
}
throw std::runtime_error("the type is unsupported");
}
template <typename T> struct syheevx_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda,
vl_value, vu_value, il, iu, abstol,
m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvx_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl,
void *vu, std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz,
range, uplo, n, lda, ldb, vl_value,
vu_value, il, iu, abstol, lda);
#endif
}
};
template <typename T> struct syhegvx_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, void *a, std::int64_t lda, void *b,
std::int64_t ldb, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data,
lda, b_data, ldb, vl_value, vu_value, il, iu,
abstol, m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvd_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::int64_t ldb, std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz,
uplo, n, lda, ldb);
}
};
template <typename T> struct syhegvd_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *b, std::int64_t ldb, void *w,
void *device_ws, std::size_t device_ws_size,
int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda,
b_data, ldb, w_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) {
oneapi::mkl::compz ret;
if (job == oneapi::mkl::job::novec) {
ret = oneapi::mkl::compz::novectors;
} else if (job == oneapi::mkl::job::vec) {
ret = oneapi::mkl::compz::vectors;
} else {
throw std::runtime_error("the job type is unsupported");
}
return ret;
}
template <typename T> struct syheev_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n,
lda);
#endif
}
};
template <typename T> struct syheev_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct syheevd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n,
lda);
}
};
template <typename T> struct syheevd_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
#undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE
#undef DISPATCH_FLOAT_FOR_CALCULATION
template <typename T> struct trtri_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
device_ws_size =
oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda);
#endif
}
};
template <typename T> struct trtri_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
void *a, std::int64_t lda, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
} // namespace detail
/// Computes the size of workspace memory of getrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>(
q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the LU factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by L and U. The unit
/// diagonal elements of L are not stored.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting
/// LU factorization is computed.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
if (ipiv == nullptr) {
return detail::lapack_shim<detail::getrfnp_impl>(
q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv,
device_ws, device_ws_size_in_element_number, info);
}
return detail::lapack_shim<detail::getrf_impl>(
q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws,
device_ws_size_in_element_number, info);
#endif
}
/// Solves a system of linear equations with a LU-factored square coefficient
/// matrix, with multiple right-hand sides.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] trans Indicates the form of the linear equation.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] a The input matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ipiv The pivot indices.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::getrs_impl>(
q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type,
a, lda, ipiv, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of geqrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>(
q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the QR factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] tau_type The data type of the array tau.
/// \param [in] tau The array contains scalars that define elementary reflectors
/// for the matrix Q in its decomposition in a product of elementary reflectors.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::geqrf_impl>(
q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu,
signed char jobvt, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m,
n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] host_ws_size The host workspace size as a number of elements
/// of type \param a_type. Currently the value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
std::int64_t all_vec, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, int *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
std::size_t device_ws_size_64;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type,
lda, u_type, ldu, vt_type, ldvt, device_ws_size_64);
*device_ws_size = device_ws_size_64;
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::gesvd_impl>(
q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda,
s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws,
device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec.
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
detail::lapack_shim<detail::gesvd_conj_impl>(
q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s,
u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
return 0;
}
/// Computes the size of workspace memory of potrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>(
q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the Cholesky factorization of a symmetric (Hermitian)
/// positive-definite matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::potrf_impl>(
q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
}
/// Solves a system of linear equations with a Cholesky-factored symmetric
/// (Hermitian) positive-definite coefficient matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::potrs_impl>(
q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type,
a, lda, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
void *vl, void *vu, std::int64_t il,
std::int64_t iu, library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q,
compz_jobz, range, uplo, n, lda, vl, vu, il, iu,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a,
lda, vl, vu, il, iu, m, w_type, w, device_ws,
device_ws_size_in_element_number, info);
q.wait();
return ret;
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
ValueT vl, ValueT vu, int il, int iu,
int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo,
n, lda, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q,
compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a,
lda, &vl, &vu, il, iu, &m64,
detail::get_library_data_t_from_type<ValueT>(), w, device_ws,
device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvx/hegvx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int
syhegvx_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
int n, int lda, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz,
range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a real
/// generalized symmetric/Hermitian definite eigenproblem.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *m, ValueT *w, T *device_ws, int device_ws_size,
int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syhegvx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q,
itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64,
w, device_ws, device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvd/hegvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syhegvd_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n,
lda, ldb, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric/Hermitian definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
return detail::lapack_shim<detail::syhegvd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q,
itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syev/heev function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int *device_ws_size) {
std::size_t device_ws_size_tmp;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda,
device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric
/// or Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
int n, T *a, int lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
return detail::lapack_shim<detail::syheev_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q,
compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q,
jobz, uplo, n, a_type, lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t w_type, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::syheevd_impl>(
q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t lda, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n,
detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, T *a,
std::int64_t lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
return detail::lapack_shim<detail::syheevd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q,
jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w,
device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of trtri function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>(
q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type,
lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the inverse of a triangular matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// the inverse matrix of A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::trtri_impl>(
q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
#endif
}
} // namespace lapack
} // namespace dpct
#endif // __DPCT_LAPACK_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/fft_utils.hpp | //==---- fft_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FFT_UTILS_HPP__
#define __DPCT_FFT_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <optional>
#include <utility>
#include "lib_common_utils.hpp"
namespace dpct {
namespace fft {
/// An enumeration type to describe the FFT direction is forward or backward.
enum fft_direction : int {
forward = 0,
backward
};
/// An enumeration type to describe the types of FFT input and output data.
enum fft_type : int {
real_float_to_complex_float = 0,
complex_float_to_real_float,
real_double_to_complex_double,
complex_double_to_real_double,
complex_float_to_complex_float,
complex_double_to_complex_double,
};
/// A class to perform FFT calculation.
class fft_engine {
public:
/// Default constructor.
fft_engine() {}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
library_data_t input_type, long long *onembed, long long ostride,
long long odist, library_data_t output_type, long long batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<long long>(dim, n, inembed, istride, idist, input_type, onembed,
ostride, odist, output_type, batch,
direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, library_data_t input_type, int *onembed,
int ostride, int odist, library_data_t output_type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride,
odist, output_type, batch, direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, int *onembed, int ostride, int odist,
fft_type type, int batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(1);
_n[0] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 1;
_batch = batch;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(2);
_n[0] = n2;
_n[1] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 2;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(3);
_n[0] = n3;
_n[1] = n2;
_n[2] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 3;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Create the class for calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n1, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n1, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement);
return engine;
}
/// Create the class for calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n3, n2, n1, type, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride,
int idist, int *onembed, int ostride, int odist, fft_type type,
int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed,
ostride, odist, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate FFT without commit any config.
static fft_engine *create() {
fft_engine *engine = new fft_engine();
return engine;
}
/// Destroy the class for calculate FFT.
/// \param [in] engine Pointer returned from fft_engine::craete.
static void destroy(fft_engine *engine) { delete engine; }
#ifdef __INTEL_MKL__
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, long long *n, long long *inembed, long long istride,
long long idist, long long *onembed, long long ostride,
long long odist, fft_type type, long long batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, int *n, int *inembed, int istride, int idist,
int *onembed, int ostride, int odist, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 1-D FFT.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If it is not set, forward direction(if current FFT is
/// complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n1, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n1, type, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 2-D FFT.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 3-D FFT.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n3, int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n3, n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
#endif
/// Execute the FFT calculation.
/// \param [in] input Pointer to the input data.
/// \param [out] output Pointer to the output data.
/// \param [in] direction The FFT direction.
template <typename input_t, typename output_t>
void compute(input_t *input, output_t *output, fft_direction direction) {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
} else if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
}
}
template <>
void compute(float *input, sycl::float2 *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(sycl::float2 *input, float *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(double *input, sycl::double2 *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::double2 *input, double *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::float2 *input, sycl::float2 *output,
fft_direction direction) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
}
template <>
void compute(sycl::double2 *input, sycl::double2 *output,
fft_direction direction) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
}
/// Setting the user's SYCL queue for calculation.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) { _q = q; }
#ifdef __INTEL_MKL__
/// Setting whether to use external or internal workspace.
/// \param [in] flag True means using internal workspace. False means using
/// external workspace.
void use_internal_workspace(bool flag = true) {
_use_external_workspace = !flag;
}
/// Specify the external workspace.
/// \param [in] ptr Pointer to the workspace.
void set_workspace(void *ptr) {
if (!_use_external_workspace) {
return;
}
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sc->set_workspace(data);
}
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dc->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sr->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dr->set_workspace(data);
}
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
#endif
/// Get the workspace size.
/// \param [out] scratchpad_size Workspace size in bytes.
void get_workspace_size(size_t *scratchpad_size) {
if (scratchpad_size) {
*scratchpad_size = _workspace_bytes;
}
}
private:
static std::pair<library_data_t, library_data_t>
fft_type_to_data_type(fft_type type) {
switch (type) {
case fft_type::real_float_to_complex_float: {
return std::make_pair(library_data_t::real_float,
library_data_t::complex_float);
}
case fft_type::complex_float_to_real_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::real_float);
}
case fft_type::real_double_to_complex_double: {
return std::make_pair(library_data_t::real_double,
library_data_t::complex_double);
}
case fft_type::complex_double_to_real_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::real_double);
}
case fft_type::complex_float_to_complex_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::complex_float);
}
case fft_type::complex_double_to_complex_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::complex_double);
}
}
}
void config_and_commit_basic() {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
_desc_sc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_sc->commit(*_q);
#endif
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
_desc_dc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_dc->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
_desc_sr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
_desc_dr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
#endif
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
void config_and_commit_advanced() {
#ifdef __INTEL_MKL__
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \
if (_use_external_workspace) { \
DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \
} \
if (_is_estimate_call) { \
if (_q->get_device().is_gpu()) { \
DESC->get_value( \
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \
&_workspace_estimate_bytes); \
} \
} else { \
DESC->commit(*_q); \
if (_is_estimate_call) { \
DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \
&_workspace_bytes); \
} \
} \
}
#else
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::NOT_INPLACE); \
DESC->commit(*_q); \
}
#endif
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double);
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float);
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double);
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
#undef CONFIG_AND_COMMIT
}
template <typename T>
void init(int dim, T *n, T *inembed, T istride, T idist,
library_data_t input_type, T *onembed, T ostride, T odist,
library_data_t output_type, T batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement) {
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
_n.resize(dim);
_inembed.resize(dim);
_onembed.resize(dim);
_input_type = input_type;
_output_type = output_type;
for (int i = 0; i < dim; i++) {
_n[i] = n[i];
}
if (inembed && onembed) {
for (int i = 0; i < dim; i++) {
_inembed[i] = inembed[i];
_onembed[i] = onembed[i];
}
_istride = istride;
_ostride = ostride;
if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)) {
_fwd_dist = idist;
_bwd_dist = odist;
} else if ((_output_type == library_data_t::real_float &&
_input_type == library_data_t::complex_float) ||
(_output_type == library_data_t::real_double &&
_input_type == library_data_t::complex_double)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
if (_is_user_specified_dir_and_placement &&
(_direction == fft_direction::backward)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
_fwd_dist = idist;
_bwd_dist = odist;
}
}
} else {
_is_basic = true;
}
_batch = batch;
_dim = dim;
if (_is_basic)
config_and_commit_basic();
else
config_and_commit_advanced();
}
template <class Desc_t>
void set_stride_advanced(std::shared_ptr<Desc_t> desc) {
if (_dim == 1) {
std::int64_t input_stride[2] = {0, _istride};
std::int64_t output_stride[2] = {0, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 2) {
std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride};
std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 3) {
std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride,
_inembed[2] * _istride, _istride};
std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride,
_onembed[2] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
}
}
template <class Desc_t> void swap_distance(std::shared_ptr<Desc_t> desc) {
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist);
std::int64_t temp = _bwd_dist;
_bwd_dist = _fwd_dist;
_fwd_dist = temp;
}
template <bool Is_inplace, class Desc_t>
void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) {
std::int64_t forward_distance = 0;
std::int64_t backward_distance = 0;
#define SET_STRIDE \
{ \
if (_direction == fft_direction::forward) { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
real_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
complex_stride); \
} else { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
complex_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
real_stride); \
} \
}
if (_dim == 1) {
if constexpr (Is_inplace) {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = 2 * (_n[0] / 2 + 1);
backward_distance = _n[0] / 2 + 1;
} else {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = _n[0];
backward_distance = _n[0] / 2 + 1;
}
} else if (_dim == 2) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * 2 * (_n[1] / 2 + 1);
backward_distance = _n[0] * (_n[1] / 2 + 1);
} else {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, _n[1], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1];
backward_distance = _n[0] * (_n[1] / 2 + 1);
}
} else if (_dim == 3) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1),
2 * (_n[2] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1);
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
} else {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * _n[2];
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
}
}
#undef SET_STRIDE
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
forward_distance);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
backward_distance);
}
#define COMPUTE(DESC) \
{ \
if (_is_inplace) { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input); \
} \
} else { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
auto data_output = \
dpct::detail::get_memory(reinterpret_cast<T *>(output)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \
} \
} \
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_complex(T *input, T *output, fft_direction direction) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The complex domain descriptor need different config values if the
// FFT direction or placement is different.
// Here we check the conditions, and new config values are set and
// re-committed if needed.
if (direction != _direction || is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
if (direction != _direction) {
swap_distance(_desc_sc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_sc->commit(*_q);
} else {
if (direction != _direction) {
swap_distance(_desc_dc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_dc->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sc);
} else {
COMPUTE(_desc_dc);
}
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_real(T *input, T *output) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The real domain descriptor need different config values if the
// FFT placement is different.
// Here we check the condition, and new config values are set and
// re-committed if needed.
if (is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_sr);
} else {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
} else {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_dr);
} else {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sr);
} else {
COMPUTE(_desc_dr);
}
}
#undef COMPUTE
private:
sycl::queue *_q = nullptr;
int _dim;
std::vector<std::int64_t> _n;
std::vector<std::int64_t> _inembed;
std::int64_t _istride;
std::int64_t _fwd_dist;
library_data_t _input_type;
std::vector<std::int64_t> _onembed;
std::int64_t _ostride;
std::int64_t _bwd_dist;
library_data_t _output_type;
std::int64_t _batch = 1;
bool _is_basic = false;
bool _is_inplace = false;
fft_direction _direction = fft_direction::forward;
bool _is_user_specified_dir_and_placement = false;
bool _use_external_workspace = false;
void *_external_workspace_ptr = nullptr;
size_t _workspace_bytes = 0;
bool _is_estimate_call = false;
size_t _workspace_estimate_bytes = 0;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>
_desc_sr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>
_desc_dr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_sc;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_dc;
};
using fft_engine_ptr = fft_engine *;
} // namespace fft
} // namespace dpct
#endif // __DPCT_FFT_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/lib_common_utils.hpp | //==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LIB_COMMON_UTILS_HPP__
#define __DPCT_LIB_COMMON_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace detail {
template <typename T> inline auto get_memory(T *x) {
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<std::remove_cv_t<T>>(x);
#else
return x;
#endif
}
template <typename T>
inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) {
using Ty = typename DataType<T>::T2;
Ty s_h;
detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait();
return s_h;
}
}
enum class version_field : int {
major,
minor,
update,
patch
};
/// Returns the requested field of Intel(R) oneAPI Math Kernel Library version.
/// \param field The version information field (major, minor, update or patch).
/// \param result The result value.
inline void mkl_get_version(version_field field, int *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
MKLVersion version;
mkl_get_version(&version);
if (version_field::major == field) {
*result = version.MajorVersion;
} else if (version_field::minor == field) {
*result = version.MinorVersion;
} else if (version_field::update == field) {
*result = version.UpdateVersion;
} else if (version_field::patch == field) {
*result = 0;
} else {
throw std::runtime_error("unknown field");
}
#endif
}
enum class library_data_t : unsigned char {
real_float = 0,
complex_float,
real_double,
complex_double,
real_half,
complex_half,
real_bfloat16,
complex_bfloat16,
real_int4,
complex_int4,
real_uint4,
complex_uint4,
real_int8,
complex_int8,
real_uint8,
complex_uint8,
real_int16,
complex_int16,
real_uint16,
complex_uint16,
real_int32,
complex_int32,
real_uint32,
complex_uint32,
real_int64,
complex_int64,
real_uint64,
complex_uint64,
real_int8_4,
real_int8_32,
real_uint8_4,
library_data_t_size
};
namespace detail {
template <typename ArgT>
inline constexpr std::uint64_t get_type_combination_id(ArgT Val) {
static_assert((unsigned char)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
return (std::uint64_t)Val;
}
template <typename FirstT, typename... RestT>
inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
RestT... RestVal) {
static_assert((std::uint8_t)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
}
inline constexpr std::size_t library_data_size[] = {
8 * sizeof(float), // real_float
8 * sizeof(std::complex<float>), // complex_float
8 * sizeof(double), // real_double
8 * sizeof(std::complex<double>), // complex_double
8 * sizeof(sycl::half), // real_half
8 * sizeof(std::complex<sycl::half>), // complex_half
16, // real_bfloat16
16 * 2, // complex_bfloat16
4, // real_int4
4 * 2, // complex_int4
4, // real_uint4
4 * 2, // complex_uint4
8, // real_int8
8 * 2, // complex_int8
8, // real_uint8
8 * 2, // complex_uint8
16, // real_int16
16 * 2, // complex_int16
16, // real_uint16
16 * 2, // complex_uint16
32, // real_int32
32 * 2, // complex_int32
32, // real_uint32
32 * 2, // complex_uint32
64, // real_int64
64 * 2, // complex_int64
64, // real_uint64
64 * 2, // complex_uint64
8, // real_int8_4
8, // real_int8_32
8 // real_uint8_4
};
} // namespace detail
} // namespace dpct
#endif // __DPCT_LIB_COMMON_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/sparse_utils.hpp | //==---- sparse_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_SPARSE_UTILS_HPP__
#define __DPCT_SPARSE_UTILS_HPP__
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace sparse {
/// Describes properties of a sparse matrix.
/// The properties are matrix type, diag, uplo and index base.
class matrix_info {
public:
/// Matrix types are:
/// ge: General matrix
/// sy: Symmetric matrix
/// he: Hermitian matrix
/// tr: Triangular matrix
enum class matrix_type : int { ge = 0, sy, he, tr };
auto get_matrix_type() const { return _matrix_type; }
auto get_diag() const { return _diag; }
auto get_uplo() const { return _uplo; }
auto get_index_base() const { return _index_base; }
void set_matrix_type(matrix_type mt) { _matrix_type = mt; }
void set_diag(oneapi::mkl::diag d) { _diag = d; }
void set_uplo(oneapi::mkl::uplo u) { _uplo = u; }
void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; }
private:
matrix_type _matrix_type = matrix_type::ge;
oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit;
oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper;
oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero;
};
/// Computes a CSR format sparse matrix-dense vector product.
/// y = alpha * op(A) * x + beta * y
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] num_rows Number of rows of the matrix A.
/// \param [in] num_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] x Data of the vector x.
/// \param [in] beta Scaling factor for the vector x.
/// \param [in, out] y Data of the vector y.
template <typename T>
void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows,
int num_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *x, const T *beta,
T *y) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows,
num_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x)));
auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle);
oneapi::mkl::sparse::gemv(queue, trans, alpha_value, *sparse_matrix_handle,
data_x, beta_value, data_y);
break;
}
case matrix_info::matrix_type::sy: {
oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::tr: {
oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans,
info->get_diag(), *sparse_matrix_handle);
oneapi::mkl::sparse::trmv(queue, info->get_uplo(), trans, info->get_diag(),
alpha_value, *sparse_matrix_handle, data_x,
beta_value, data_y);
break;
}
default:
throw std::runtime_error(
"the spmv does not support matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
/// Computes a CSR format sparse matrix-dense matrix product.
/// C = alpha * op(A) * B + beta * C
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] sparse_rows Number of rows of the matrix A.
/// \param [in] dense_cols Number of columns of the matrix B or C.
/// \param [in] sparse_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] b Data of the matrix B.
/// \param [in] ldb Leading dimension of the matrix B.
/// \param [in] beta Scaling factor for the matrix B.
/// \param [in, out] c Data of the matrix C.
/// \param [in] ldc Leading dimension of the matrix C.
template <typename T>
void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows,
int dense_cols, int sparse_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *b, int ldb,
const T *beta, T *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows,
sparse_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b)));
auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans,
oneapi::mkl::transpose::nontrans, alpha_value,
*sparse_matrix_handle, data_b, dense_cols, ldb,
beta_value, data_c, ldc);
break;
}
default:
throw std::runtime_error(
"the csrmm does not support matrix_info::matrix_type::sy, "
"matrix_info::matrix_type::tr and matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Saving the optimization information for solving a system of linear
/// equations.
class optimize_info {
public:
/// Constructor
optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); }
/// Destructor
~optimize_info() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destructor.
/// \param [in] e The event which the destructor depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
private:
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
};
#endif
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Performs internal optimizations for solving a system of linear equations for
/// a CSR format sparse matrix.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the sparse matrix.
/// \param [in] row_col Number of rows of the sparse matrix.
/// \param [in] info Matrix info of the sparse matrix.
/// \param [in] val An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] optimize_info The result of the optimizations.
template <typename T>
void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans,
int row_col, const std::shared_ptr<matrix_info> info,
const T *val, const int *row_ptr, const int *col_ind,
std::shared_ptr<optimize_info> optimize_info) {
using Ty = typename dpct::DataType<T>::T2;
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(),
row_col, row_col, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
if (info->get_matrix_type() != matrix_info::matrix_type::tr)
return;
#ifndef DPCT_USM_LEVEL_NONE
sycl::event e;
e =
#endif
oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans,
info->get_diag(),
optimize_info->get_matrix_handle());
#ifndef DPCT_USM_LEVEL_NONE
optimize_info->add_dependency(e);
#endif
}
#endif
class sparse_matrix_desc;
using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>;
/// Structure for describe a dense vector
class dense_vector_desc {
public:
dense_vector_desc(std::int64_t ele_num, void *value,
library_data_t value_type)
: _ele_num(ele_num), _value(value), _value_type(value_type) {}
void get_desc(std::int64_t *ele_num, const void **value,
library_data_t *value_type) const noexcept {
*ele_num = _ele_num;
*value = _value;
*value_type = _value_type;
}
void get_desc(std::int64_t *ele_num, void **value,
library_data_t *value_type) const noexcept {
get_desc(ele_num, const_cast<const void **>(value), value_type);
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
private:
std::int64_t _ele_num;
void *_value;
library_data_t _value_type;
};
/// Structure for describe a dense matrix
class dense_matrix_desc {
public:
dense_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t leading_dim, void *value,
library_data_t value_type, oneapi::mkl::layout layout)
: _row_num(row_num), _col_num(col_num), _leading_dim(leading_dim),
_value(value), _value_type(value_type), _layout(layout) {}
void get_desc(std::int64_t *row_num, std::int64_t *col_num,
std::int64_t *leading_dim, void **value,
library_data_t *value_type,
oneapi::mkl::layout *layout) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*leading_dim = _leading_dim;
*value = _value;
*value_type = _value_type;
*layout = _layout;
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
std::int64_t get_col_num() const noexcept { return _col_num; }
std::int64_t get_leading_dim() const noexcept { return _leading_dim; }
oneapi::mkl::layout get_layout() const noexcept { return _layout; }
private:
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _leading_dim;
void *_value;
library_data_t _value_type;
oneapi::mkl::layout _layout;
};
/// Sparse matrix data format
enum matrix_format : int {
csr = 1,
};
/// Sparse matrix attribute
enum matrix_attribute : int { uplo = 0, diag };
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Structure for describe a sparse matrix
class sparse_matrix_desc {
public:
/// Constructor
/// \param [out] desc The descriptor to be created
/// \param [in] row_num Number of rows of the sparse matrix.
/// \param [in] col_num Number of colums of the sparse matrix.
/// \param [in] nnz Non-zero elements in the sparse matrix.
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr_type Data type of the \p row_ptr .
/// \param [in] col_ind_type Data type of the \p col_ind .
/// \param [in] base Indicates how input arrays are indexed.
/// \param [in] value_type Data type of the \p value .
/// \param [in] data_format The matrix data format.
sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t nnz, void *row_ptr, void *col_ind,
void *value, library_data_t row_ptr_type,
library_data_t col_ind_type, oneapi::mkl::index_base base,
library_data_t value_type, matrix_format data_format)
: _row_num(row_num), _col_num(col_num), _nnz(nnz), _row_ptr(row_ptr),
_col_ind(col_ind), _value(value), _row_ptr_type(row_ptr_type),
_col_ind_type(col_ind_type), _base(base), _value_type(value_type),
_data_format(data_format) {
if (_data_format != matrix_format::csr) {
throw std::runtime_error("the sparse matrix data format is unsupported");
}
oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle);
construct();
}
/// Destructor
~sparse_matrix_desc() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destroy method.
/// \param [in] e The event which the destroy method depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
/// Get the values saved in the descriptor
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
/// \param [out] row_ptr An array of length \p row_num + 1.
/// \param [out] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] value An array containing the non-zero elements of the sparse matrix.
/// \param [out] row_ptr_type Data type of the \p row_ptr .
/// \param [out] col_ind_type Data type of the \p col_ind .
/// \param [out] base Indicates how input arrays are indexed.
/// \param [out] value_type Data type of the \p value .
void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz,
void **row_ptr, void **col_ind, void **value,
library_data_t *row_ptr_type, library_data_t *col_ind_type,
oneapi::mkl::index_base *base,
library_data_t *value_type) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
*row_ptr = _row_ptr;
*col_ind = _col_ind;
*value = _value;
*row_ptr_type = _row_ptr_type;
*col_ind_type = _col_ind_type;
*base = _base;
*value_type = _value_type;
}
/// Get the sparse matrix data format of this descriptor
/// \param [out] format The matrix data format result
void get_format(matrix_format *data_format) const noexcept {
*data_format = _data_format;
}
/// Get the index base of this descriptor
/// \param [out] base The index base result
void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; }
/// Get the value pointer of this descriptor
/// \param [out] value The value pointer result
void get_value(void **value) const noexcept { *value = _value; }
/// Set the value pointer of this descriptor
/// \param [in] value The input value pointer
void set_value(void *value) {
// Assume the new data is different from the old data
_value = value;
construct();
}
/// Get the size of the sparse matrix
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
void get_size(int64_t *row_num, int64_t *col_num,
int64_t *nnz) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
}
/// Set the sparse matrix attribute
/// \param [in] attribute The attribute type
/// \param [in] data The attribute value
/// \param [in] data_size The data size of the attribute value
void set_attribute(matrix_attribute attribute, const void *data,
size_t data_size) {
if (attribute == matrix_attribute::diag) {
const oneapi::mkl::diag *diag_ptr =
reinterpret_cast<const oneapi::mkl::diag *>(data);
if (*diag_ptr == oneapi::mkl::diag::unit) {
_diag = oneapi::mkl::diag::unit;
} else if (*diag_ptr == oneapi::mkl::diag::nonunit) {
_diag = oneapi::mkl::diag::nonunit;
} else {
throw std::runtime_error("unsupported diag value");
}
} else if (attribute == matrix_attribute::uplo) {
const oneapi::mkl::uplo *uplo_ptr =
reinterpret_cast<const oneapi::mkl::uplo *>(data);
if (*uplo_ptr == oneapi::mkl::uplo::upper) {
_uplo = oneapi::mkl::uplo::upper;
} else if (*uplo_ptr == oneapi::mkl::uplo::lower) {
_uplo = oneapi::mkl::uplo::lower;
} else {
throw std::runtime_error("unsupported uplo value");
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Get the sparse matrix attribute
/// \param [out] attribute The attribute type
/// \param [out] data The attribute value
/// \param [out] data_size The data size of the attribute value
void get_attribute(matrix_attribute attribute, void *data,
size_t data_size) const {
if (attribute == matrix_attribute::diag) {
oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data);
if (_diag.has_value()) {
*diag_ptr = _diag.value();
} else {
*diag_ptr = oneapi::mkl::diag::nonunit;
}
} else if (attribute == matrix_attribute::uplo) {
oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data);
if (_uplo.has_value()) {
*uplo_ptr = _uplo.value();
} else {
*uplo_ptr = oneapi::mkl::uplo::lower;
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Set the pointers for describing the sparse matrix
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
void set_pointers(void *row_ptr, void *col_ind, void *value) {
// Assume the new data is different from the old data
_row_ptr = row_ptr;
_col_ind = col_ind;
_value = value;
construct();
}
/// Get the diag attribute
/// \return diag value
std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; }
/// Get the uplo attribute
/// \return uplo value
std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; }
private:
template <typename index_t, typename value_t> void set_data() {
auto data_row_ptr =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr));
auto data_col_ind =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind));
auto data_value =
dpct::detail::get_memory(reinterpret_cast<value_t *>(_value));
oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle,
_row_num, _col_num, _base, data_row_ptr,
data_col_ind, data_value);
get_default_queue().wait();
}
void construct() {
std::uint64_t key = dpct::detail::get_type_combination_id(
_row_ptr_type, _col_ind_type, _value_type);
switch (key) {
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_float): {
set_data<std::int32_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_double): {
set_data<std::int32_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::complex_float): {
set_data<std::int32_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_double): {
set_data<std::int32_t, std::complex<double>>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_float): {
set_data<std::int64_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_double): {
set_data<std::int64_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::complex_float): {
set_data<std::int64_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_double): {
set_data<std::int64_t, std::complex<double>>();
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _nnz;
void *_row_ptr;
void *_col_ind;
void *_value;
library_data_t _row_ptr_type;
library_data_t _col_ind_type;
oneapi::mkl::index_base _base;
library_data_t _value_type;
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
matrix_format _data_format;
std::optional<oneapi::mkl::uplo> _uplo;
std::optional<oneapi::mkl::diag> _diag;
};
namespace detail {
#ifdef DPCT_USM_LEVEL_NONE
#define SPARSE_CALL(X) \
do { \
X; \
} while (0)
#else
#define SPARSE_CALL(X) \
do { \
sycl::event e = X; \
a->add_dependency(e); \
} while (0)
#endif
template <typename Ty>
inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value()));
auto data_y =
dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value()));
if (a->get_diag().has_value() && a->get_uplo().has_value()) {
oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans,
a->get_diag().value(),
a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::trmv(
queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value,
a->get_matrix_handle(), data_x, beta_value, data_y));
} else {
oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
a->get_matrix_handle(), data_x,
beta_value, data_y));
}
}
template <typename Ty>
inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a,
std::shared_ptr<dense_matrix_desc> b, const void *beta,
std::shared_ptr<dense_matrix_desc> c) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value()));
auto data_c =
dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value()));
SPARSE_CALL(oneapi::mkl::sparse::gemm(
queue, b->get_layout(), trans_a, trans_b, alpha_value,
a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(),
beta_value, data_c, c->get_leading_dim()));
}
#undef SPARSE_CALL
} // namespace detail
/// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta * y.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans Specifies operation on input matrix.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] x Specifies the dense vector x.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] y Specifies the dense vector y.
/// \param [in] data_type Specifies the data type of \param a, \param x and \param y .
inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y,
library_data_t data_type) {
switch (data_type) {
case library_data_t::real_float: {
detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::real_double: {
detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_float: {
detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_double: {
detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta, y);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) + beta * c.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans_a Specifies operation on input matrix a.
/// \param [in] trans_b Specifies operation on input matrix b.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] b Specifies the dense matrix b.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] c Specifies the dense matrix c.
/// \param [in] data_type Specifies the data type of \param a, \param b and \param c .
inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b,
const void *beta, std::shared_ptr<dense_matrix_desc> c,
library_data_t data_type) {
if (b->get_layout() != c->get_layout())
throw std::runtime_error("the layout of b and c are different");
switch (data_type) {
case library_data_t::real_float: {
detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::real_double: {
detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::complex_float: {
detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a, b,
beta, c);
break;
}
case library_data_t::complex_double: {
detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
#endif
} // namespace sparse
} // namespace dpct
#endif // __DPCT_SPARSE_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
inline auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::event *event_ptr;
typedef sycl::queue *queue_ptr;
typedef char *device_ptr;
/// Destroy \p event pointed memory.
///
/// \param event Pointer to the sycl::event address.
static void destroy_event(event_ptr event) {
delete event;
}
class device_info {
public:
// get interface
const char *get_name() const { return _name; }
char *get_name() { return _name; }
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() const {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
bool get_host_unified_memory() const { return _host_unified_memory; }
int get_major_version() const { return _major; }
int get_minor_version() const { return _minor; }
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
int get_max_work_group_size() const { return _max_work_group_size; }
int get_max_sub_group_size() const { return _max_sub_group_size; }
int get_max_work_items_per_compute_unit() const {
return _max_work_items_per_compute_unit;
}
int get_max_register_size_per_work_group() const {
return _max_register_size_per_work_group;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() const {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
size_t get_global_mem_size() const { return _global_mem_size; }
size_t get_local_mem_size() const { return _local_mem_size; }
/// Returns the maximum clock rate of device's global memory in kHz. If
/// compiler does not support this API then returns default value 3200000 kHz.
unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
/// Returns the maximum bus width between device and memory in bits. If
/// compiler does not support this API then returns default value 64 bits.
unsigned int get_memory_bus_width() const { return _memory_bus_width; }
uint32_t get_device_id() const { return _device_id; }
std::array<unsigned char, 16> get_uuid() const { return _uuid; }
// set interface
void set_name(const char* name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_integrated(int integrated) { _integrated = integrated; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void
set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void
set_max_register_size_per_work_group(int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
void set_device_id(uint32_t device_id) {
_device_id = device_id;
}
void set_uuid(std::array<unsigned char, 16> uuid) {
_uuid = std::move(uuid);
}
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
uint32_t _device_id;
std::array<unsigned char, 16> _uuid;
};
/// dpct device extension
class device_ext : public sycl::device {
typedef std::mutex mutex_type;
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
}
device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) {
std::lock_guard<mutex_type> lock(m_mutex);
init_queues();
}
int is_native_atomic_supported() { return 0; }
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
/// Return the maximum clock frequency of this device in KHz.
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
int get_max_sub_group_size() const {
return get_device_info().get_max_sub_group_size();
}
int get_max_register_size_per_work_group() const {
return get_device_info().get_max_register_size_per_work_group();
}
int get_max_work_group_size() const {
return get_device_info().get_max_work_group_size();
}
int get_mem_base_addr_align() const {
return get_info<sycl::info::device::mem_base_addr_align>();
}
size_t get_global_mem_size() const {
return get_device_info().get_global_mem_size();
}
/// Get the number of bytes of free and total memory on the SYCL device.
/// \param [out] free_memory The number of bytes of free memory on the SYCL device.
/// \param [out] total_memory The number of bytes of total memory on the SYCL device.
void get_memory_info(size_t &free_memory, size_t &total_memory) {
#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
if (!has(sycl::aspect::ext_intel_free_memory)) {
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
} else {
free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
}
#else
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
#if defined(_MSC_VER) && !defined(__clang__)
#pragma message("Querying the number of bytes of free memory is not supported")
#else
#warning "Querying the number of bytes of free memory is not supported"
#endif
#endif
total_memory = get_device_info().get_global_mem_size();
}
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(
this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>() * 1000);
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(
get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0)
prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
if (this->has(sycl::aspect::ext_intel_device_id)) {
prop.set_device_id(
this->get_info<sycl::ext::intel::info::device::device_id>());
}
if (this->has(sycl::aspect::ext_intel_device_info_uuid)) {
prop.set_uuid(
this->get_info<sycl::ext::intel::info::device::uuid>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message("get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning "get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
void reset() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
init_queues();
}
sycl::queue &in_order_queue() { return *_q_in_order; }
sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
sycl::queue &default_queue() {
#ifdef DPCT_USM_LEVEL_NONE
return out_of_order_queue();
#else
return in_order_queue();
#endif // DPCT_USM_LEVEL_NONE
}
void queues_wait_and_throw() {
std::unique_lock<mutex_type> lock(m_mutex);
std::vector<std::shared_ptr<sycl::queue>> current_queues(
_queues);
lock.unlock();
for (const auto &q : current_queues) {
q->wait_and_throw();
}
// Guard the destruct of current_queues to make sure the ref count is safe.
lock.lock();
}
sycl::queue *create_queue(bool enable_exception_handler = false) {
#ifdef DPCT_USM_LEVEL_NONE
return create_out_of_order_queue(enable_exception_handler);
#else
return create_in_order_queue(enable_exception_handler);
#endif // DPCT_USM_LEVEL_NONE
}
sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler,
sycl::property::queue::in_order());
}
sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler);
}
void destroy_queue(sycl::queue *&queue) {
std::lock_guard<mutex_type> lock(m_mutex);
_queues.erase(std::remove_if(_queues.begin(), _queues.end(),
[=](const std::shared_ptr<sycl::queue> &q) -> bool {
return q.get() == queue;
}),
_queues.end());
queue = nullptr;
}
void set_saved_queue(sycl::queue* q) {
std::lock_guard<mutex_type> lock(m_mutex);
_saved_queue = q;
}
sycl::queue *get_saved_queue() const {
std::lock_guard<mutex_type> lock(m_mutex);
return _saved_queue;
}
sycl::context get_context() const { return _ctx; }
private:
void clear_queues() {
_queues.clear();
_q_in_order = _q_out_of_order = _saved_queue = nullptr;
}
void init_queues() {
_q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
_q_out_of_order = create_queue_impl(true);
_saved_queue = &default_queue();
}
/// Caller should acquire resource \p m_mutex before calling this function.
template <class... Properties>
sycl::queue *create_queue_impl(bool enable_exception_handler,
Properties... properties) {
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh,
sycl::property_list(
#ifdef DPCT_PROFILING_ENABLED
sycl::property::queue::enable_profiling(),
#endif
properties...)));
return _queues.back().get();
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i]))
break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.')
break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_q_in_order, *_q_out_of_order;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable mutex_type m_mutex;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &cpu_device() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
if (_cpu_device == -1) {
throw std::runtime_error("no valid cpu device");
} else {
return *_devs[_cpu_device];
}
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Select device with a device ID.
/// \param [in] id The id of the device which can
/// be obtained through get_device_id(const sycl::device).
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()]=id;
}
unsigned int device_count() { return _devs.size(); }
unsigned int get_device_id(const sycl::device &dev) {
unsigned int id = 0;
for(auto dev_item : _devs) {
if (*dev_item == dev) {
break;
}
id++;
}
return id;
}
template <class DeviceSelector>
std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
sycl::device selected_device = sycl::device(selector);
unsigned int selected_device_id = get_device_id(selected_device);
select_device(selected_device_id);
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current selected device depends on
/// the USM config. Return the default out-of-ordered queue when USM-none is
/// enabled, otherwise return the default in-ordered queue.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the default in-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_in_order_queue() {
return dev_mgr::instance().current_device().in_order_queue();
}
/// Util function to get the default out-of-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_out_of_order_queue() {
return dev_mgr::instance().current_device().out_of_order_queue();
}
/// Util function to get the id of current device in
/// dpct device manager.
static inline unsigned int get_current_device_id() {
return dev_mgr::instance().current_device_id();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
/// Util function to get a device by id.
static inline device_ext &get_device(unsigned int id) {
return dev_mgr::instance().get_device(id);
}
/// Util function to get the context of the default queue of current
/// device in dpct device manager.
static inline sycl::context get_default_context() {
return dpct::get_current_device().get_context();
}
/// Util function to get a CPU device.
static inline device_ext &cpu_device() {
return dev_mgr::instance().cpu_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
template <class DeviceSelector>
static inline std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
dev_mgr::instance().select_device(selector);
}
static inline unsigned int get_device_id(const sycl::device &dev){
return dev_mgr::instance().get_device_id(dev);
}
/// Util function to check whether a device supports some kinds of sycl::aspect.
inline void
has_capability_or_fail(const sycl::device &dev,
const std::initializer_list<sycl::aspect> &props) {
for (const auto &it : props) {
if (dev.has(it))
continue;
switch (it) {
case sycl::aspect::fp64:
throw std::runtime_error("'double' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
case sycl::aspect::fp16:
throw std::runtime_error("'half' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
default:
#define __SYCL_ASPECT(ASPECT, ID) \
case sycl::aspect::ASPECT: \
return #ASPECT;
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
switch (AspectNum) {
#include <sycl/info/aspects.def>
#include <sycl/info/aspects_deprecated.def>
default:
return "unknown aspect";
}
};
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
#undef __SYCL_ASPECT_DEPRECATED
#undef __SYCL_ASPECT
throw std::runtime_error(
"'" + getAspectNameStr(it) + "' is not supported in '" +
dev.get_info<sycl::info::device::name>() + "' device");
}
break;
}
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include "device.hpp"
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size)
return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr)
return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <memory_region Memory, class T = byte_t> class memory_traits {
public:
static constexpr sycl::access::target target =
sycl::access::target::device;
static constexpr sycl::access_mode mode =
(Memory == constant) ? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
int value, size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x,
size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)), from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size,
[=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U> struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
};
} // namespace deprecated
inline void dpct_free(void *ptr,
const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template<class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr)
return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(
sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode>
get_access(const void *ptr, sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T> static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr =
BufferOffset.first.get_host_access()
.get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data
dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr,
sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be freed.
/// \param events The events to be waited.
/// \param q The sycl::queue the memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from,
sycl::id<3> from_pos, sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <class T, memory_region Memory> class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory> class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(
const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false),
_host_ptr(nullptr), _device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference)
dpct::dpct_free(_device_ptr);
if (_host_ptr)
std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() {
init(dpct::get_default_queue());
}
/// Allocate memory with specified queue, and init memory if has initial value.
void init(sycl::queue &q) {
if (_device_ptr)
return;
if (!_size)
return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() {
return get_ptr(get_default_queue());
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type
get_access(sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size), _range(size / sizeof(T)), _reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(
_size, q.get_device(), q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
}
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr,
sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type !=
sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type !=
sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device) ? ptr : nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() {
return memory_type;
}
const void *get_device_pointer() {
return device_pointer;
}
const void *get_host_pointer() {
return host_pointer;
}
bool is_memory_shared() {
return memory_type == sycl::usm::alloc::shared;
}
unsigned int get_device_id() {
return device_id;
}
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "dpl_extras/memory.h"
#include "dpl_extras/algorithm.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/vector.h"
#include "dpl_extras/dpcpp_extensions.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T> bool isnan(const T a) { return sycl::isnan(a); }
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i)
f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T> inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i)
ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
compare_both(const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
compare(const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T> inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f)
return 0.f;
return a;
}
template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T> inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T> inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T> auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T> inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T> inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T> inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <utility>
#include <vector>
#include <thread>
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array)
sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template<typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced)
_temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced)
return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x,
library_data_t x_type, int incx, const void *y,
library_data_t y_type, int incy, void *result,
library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const float *>(x), incx,
reinterpret_cast<const float *>(y), incy,
reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val,
data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val,
data_x, incx,
data_y, incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx,
data_y, incy, c_value,
s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
data_b, ldb, beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void
gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n,
int k, const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
stride_a, data_b, ldb, stride_b, beta_value,
data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k,
const T *alpha, const T *a, int lda, const T *b,
int ldb, const Tbeta *beta, T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C
// For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C
// The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C
// So the OPB need be updated before we call gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
beta_value, data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, data_b, ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void
trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const void *alpha,
const void **a, int lda, void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info), uplo_info(uplo_info),
transpose_info(transpose_info), diag_info(diag_info),
value_info(value_info), groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size, scratchpad,
scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad,
scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb,
stride_b, batch_size, scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
}).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf,
stride_ipiv, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device,
exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n,
T *a[], int lda, T *tau[], int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(
q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy,
const void *c, const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a,
lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half,
a, lda, b, ldb, &beta_half, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
b, ldb, beta, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
float>(q, a_trans, b_trans, m, n, k, &alpha_float,
a, lda, b, ldb, &beta_float, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc,
batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
stride_a, b, ldb, stride_b, beta, c, ldc,
stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b,
&beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value stored in
/// \p addr is equal to zero or greater than \p operand, else decrease the value stored
/// in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand))
break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0))
break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int
atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in, out] addr Multi_ptr.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in] addr The pointer to the data.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail{
template <typename T> struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic{
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic argument
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the value held previously
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic addition
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t> class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc> struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T> auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp> class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp> class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp> struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp>
operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T> struct __zip_iterator_impl;
template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator
operator+(difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT> struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; },
[=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1>
partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void
mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n, int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters)
keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1>
equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end,
const ValueLessComparable &value, StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable
{
using type = T;
};
template <>
struct make_allocatable<void>
{
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T> class device_pointer;
#endif
template <typename T> struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T> void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr).alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T> class device_iterator;
template <typename ValueType, typename Derived> class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T> class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T> void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T> device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T> const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T> T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T> const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T> T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include "memory.h"
#include <algorithm>
#include <iterator>
#include <vector>
#include "../device.hpp"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA> operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()), _size(other.size()),
_capacity(other.capacity()), _storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void
assign(InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA> operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void
assign(InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <sycl/sycl.hpp>
#include <stdexcept>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT> struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false> class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void
rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void
exclusive_downsweep(const Item &item, packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U> struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U> struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U> struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U> struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T> struct traits : base_traits<T, T> {};
template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <> struct traits<int> : base_traits<int, uint32_t> {};
template <> struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N> struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD> class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void
scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void
sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0,
int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit)
break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T
reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts> struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp> struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp> struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/02_sycl_migrated_optimized/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T> struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T> struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp> class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp> class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp> class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName> struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less> struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate> struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate> struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T> struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T> result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate> struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T> result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate> struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<0>(t)))
get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<1>(t)))
get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T> void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t)))
get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N> struct uint_byte_map {};
template <> struct uint_byte_map<1> { using type = uint8_t; };
template <> struct uint_byte_map<2> { using type = uint16_t; };
template <> struct uint_byte_map<4> { using type = uint32_t; };
template <> struct uint_byte_map<8> { using type = uint64_t; };
template <typename T> struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT> class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:1: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:2: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:4: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_175225 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_319217 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:6: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:7: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:8: The "devID" device may be not the one intended for use. Adjust the
selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:9: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:10: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:11: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:12: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:14: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator_common.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUASIRANDOMGENERATOR_COMMON_H
#define QUASIRANDOMGENERATOR_COMMON_H
////////////////////////////////////////////////////////////////////////////////
// Global types and constants
////////////////////////////////////////////////////////////////////////////////
typedef long long int INT64;
#define QRNG_DIMENSIONS 2
#define QRNG_RESOLUTION 31
#define INT_SCALE (1.0f / (float)0x80000001U)
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator_gold.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <math.h>
#include "quasirandomGenerator_common.h"
////////////////////////////////////////////////////////////////////////////////
// Table generation functions
////////////////////////////////////////////////////////////////////////////////
// Internal 64(63)-bit table
static INT64 cjn[63][QRNG_DIMENSIONS];
static int GeneratePolynomials(int buffer[QRNG_DIMENSIONS], bool primitive) {
int i, j, n, p1, p2, l;
int e_p1, e_p2, e_b;
// generate all polynomials to buffer
for (n = 1, buffer[0] = 0x2, p2 = 0, l = 0; n < QRNG_DIMENSIONS; ++n) {
// search for the next irreducible polynomial
for (p1 = buffer[n - 1] + 1;; ++p1) {
// find degree of polynomial p1
for (e_p1 = 30; (p1 & (1 << e_p1)) == 0; --e_p1) {
}
// try to divide p1 by all polynomials in buffer
for (i = 0; i < n; ++i) {
// find the degree of buffer[i]
for (e_b = e_p1; (buffer[i] & (1 << e_b)) == 0; --e_b) {
}
// divide p2 by buffer[i] until the end
for (p2 = (buffer[i] << ((e_p2 = e_p1) - e_b)) ^ p1; p2 >= buffer[i];
p2 = (buffer[i] << (e_p2 - e_b)) ^ p2) {
for (; (p2 & (1 << e_p2)) == 0; --e_p2) {
}
} // compute new degree of p2
// division without remainder!!! p1 is not irreducible
if (p2 == 0) {
break;
}
}
// all divisions were with remainder - p1 is irreducible
if (p2 != 0) {
e_p2 = 0;
if (primitive) {
// check that p1 has only one cycle (i.e. is monic, or primitive)
j = ~(0xffffffff << (e_p1 + 1));
e_b = (1 << e_p1) | 0x1;
for (p2 = e_b, e_p2 = (1 << e_p1) - 2; e_p2 > 0; --e_p2) {
p2 <<= 1;
i = p2 & p1;
i = (i & 0x55555555) + ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
i = (i & 0x07070707) + ((i >> 4) & 0x07070707);
p2 |= (i % 255) & 1;
if ((p2 & j) == e_b) break;
}
}
// it is monic - add it to the list of polynomials
if (e_p2 == 0) {
buffer[n] = p1;
l += e_p1;
break;
}
}
}
}
return l + 1;
}
////////////////////////////////////////////////////////////////////////////////
// @misc{Bratley92:LDS,
// author = "B. Fox and P. Bratley and H. Niederreiter",
// title = "Implementation and test of low discrepancy sequences",
// text = "B. L. Fox, P. Bratley, and H. Niederreiter. Implementation and
// test of
// low discrepancy sequences. ACM Trans. Model. Comput. Simul.,
// 2(3):195--213,
// July 1992.",
// year = "1992" }
////////////////////////////////////////////////////////////////////////////////
static void GenerateCJ() {
int buffer[QRNG_DIMENSIONS];
int *polynomials;
int n, p1, l, e_p1;
// Niederreiter (in contrast to Sobol) allows to use not primitive, but just
// irreducible polynomials
l = GeneratePolynomials(buffer, false);
// convert all polynomials from buffer to polynomials table
polynomials = new int[l + 2 * QRNG_DIMENSIONS + 1];
for (n = 0, l = 0; n < QRNG_DIMENSIONS; ++n) {
// find degree of polynomial p1
for (p1 = buffer[n], e_p1 = 30; (p1 & (1 << e_p1)) == 0; --e_p1) {
}
// fill polynomials table with values for this polynomial
polynomials[l++] = 1;
for (--e_p1; e_p1 >= 0; --e_p1) {
polynomials[l++] = (p1 >> e_p1) & 1;
}
polynomials[l++] = -1;
}
polynomials[l] = -1;
// irreducible polynomial p
int *p = polynomials, e, d;
// polynomial b
int b_arr[1024], *b, m;
// v array
int v_arr[1024], *v;
// temporary polynomial, required to do multiplication of p and b
int t_arr[1024], *t;
// subsidiary variables
int i, j, u, m1, ip, it;
// cycle over monic irreducible polynomials
for (d = 0; p[0] != -1; p += e + 2) {
// allocate memory for cj array for dimension (ip + 1)
for (i = 0; i < 63; ++i) {
cjn[i][d] = 0;
}
// determine the power of irreducible polynomial
for (e = 0; p[e + 1] != -1; ++e) {
}
// polynomial b in the beginning is just '1'
(b = b_arr + 1023)[m = 0] = 1;
// v array needs only (63 + e - 2) length
v = v_arr + 1023 - (63 + e - 2);
// cycle over all coefficients
for (j = 63 - 1, u = e; j >= 0; --j, ++u) {
if (u == e) {
u = 0;
// multiply b by p (polynomials multiplication)
for (i = 0, t = t_arr + 1023 - (m1 = m); i <= m; ++i) {
t[i] = b[i];
}
b = b_arr + 1023 - (m += e);
for (i = 0; i <= m; ++i) {
b[i] = 0;
for (ip = e - (m - i), it = m1; ip <= e && it >= 0; ++ip, --it) {
if (ip >= 0) {
b[i] ^= p[ip] & t[it];
}
}
}
// multiplication of polynomials finished
// calculate v
for (i = 0; i < m1; ++i) {
v[i] = 0;
}
for (; i < m; ++i) {
v[i] = 1;
}
for (; i <= 63 + e - 2; ++i) {
v[i] = 0;
for (it = 1; it <= m; ++it) {
v[i] ^= v[i - it] & b[it];
}
}
}
// copy calculated v to cj
for (i = 0; i < 63; i++) {
cjn[i][d] |= (INT64)v[i + u] << j;
}
}
++d;
}
delete[] polynomials;
}
// Generate 63-bit quasirandom number for given index and dimension and
// normalize
extern "C" double getQuasirandomValue63(INT64 i, int dim) {
const double INT63_SCALE = (1.0 / (double)0x8000000000000001ULL);
INT64 result = 0;
for (int bit = 0; bit < 63; bit++, i >>= 1)
if (i & 1) result ^= cjn[bit][dim];
return (double)(result + 1) * INT63_SCALE;
}
////////////////////////////////////////////////////////////////////////////////
// Initialization (table setup)
////////////////////////////////////////////////////////////////////////////////
extern "C" void initQuasirandomGenerator(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION]) {
GenerateCJ();
for (int dim = 0; dim < QRNG_DIMENSIONS; dim++)
for (int bit = 0; bit < QRNG_RESOLUTION; bit++)
table[dim][bit] = (int)((cjn[bit][dim] >> 32) & 0x7FFFFFFF);
}
////////////////////////////////////////////////////////////////////////////////
// Generate 31-bit quasirandom number for given index and dimension
////////////////////////////////////////////////////////////////////////////////
extern "C" float getQuasirandomValue(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION], int i, int dim) {
int result = 0;
for (int bit = 0; bit < QRNG_RESOLUTION; bit++, i >>= 1)
if (i & 1) result ^= table[dim][bit];
return (float)(result + 1) * INT_SCALE;
}
////////////////////////////////////////////////////////////////////////////////
// Moro's Inverse Cumulative Normal Distribution function approximation
////////////////////////////////////////////////////////////////////////////////
extern "C" double MoroInvCNDcpu(unsigned int x) {
const double a1 = 2.50662823884;
const double a2 = -18.61500062529;
const double a3 = 41.39119773534;
const double a4 = -25.44106049637;
const double b1 = -8.4735109309;
const double b2 = 23.08336743743;
const double b3 = -21.06224101826;
const double b4 = 3.13082909833;
const double c1 = 0.337475482272615;
const double c2 = 0.976169019091719;
const double c3 = 0.160797971491821;
const double c4 = 2.76438810333863E-02;
const double c5 = 3.8405729373609E-03;
const double c6 = 3.951896511919E-04;
const double c7 = 3.21767881768E-05;
const double c8 = 2.888167364E-07;
const double c9 = 3.960315187E-07;
double z;
bool negate = false;
// Ensure the conversion to floating point will give a value in the
// range (0,0.5] by restricting the input to the bottom half of the
// input domain. We will later reflect the result if the input was
// originally in the top half of the input domain
if (x >= 0x80000000UL) {
x = 0xffffffffUL - x;
negate = true;
}
// x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff])
// Convert to floating point in (0,0.5]
const double x1 = 1.0 / static_cast<double>(0xffffffffUL);
const double x2 = x1 / 2.0;
double p1 = x * x1 + x2;
// Convert to floating point in (-0.5,0]
double p2 = p1 - 0.5;
// The input to the Moro inversion is p2 which is in the range
// (-0.5,0]. This means that our output will be the negative side
// of the bell curve (which we will reflect if "negate" is true).
// Main body of the bell curve for |p| < 0.42
if (p2 > -0.42) {
z = p2 * p2;
z = p2 * (((a4 * z + a3) * z + a2) * z + a1) /
((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0);
}
// Special case (Chebychev) for tail
else {
z = log(-log(p1));
z = -(c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z *
(c7 + z * (c8 + z * c9))))))));
}
// If the original input (x) was in the top half of the range, reflect
// to get the positive side of the bell curve
return negate ? -z : z;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// CUDA Runtime
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
// Utilities and system includes
#include <helper_functions.h>
#include <helper_cuda.h>
#include "quasirandomGenerator_common.h"
#include <cmath>
////////////////////////////////////////////////////////////////////////////////
// CPU code
////////////////////////////////////////////////////////////////////////////////
extern "C" void initQuasirandomGenerator(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION]);
extern "C" float getQuasirandomValue(
unsigned int table[QRNG_DIMENSIONS][QRNG_RESOLUTION], int i, int dim);
extern "C" double getQuasirandomValue63(INT64 i, int dim);
extern "C" double MoroInvCNDcpu(unsigned int p);
////////////////////////////////////////////////////////////////////////////////
// GPU code
////////////////////////////////////////////////////////////////////////////////
extern "C" void initTableGPU(
unsigned int tableCPU[QRNG_DIMENSIONS][QRNG_RESOLUTION]);
extern "C" void quasirandomGeneratorGPU(float *d_Output, unsigned int seed,
unsigned int N);
extern "C" void inverseCNDgpu(float *d_Output, unsigned int *d_Input,
unsigned int N);
const int N = 1048576;
int main(int argc, char **argv) {
// Start logs
printf("%s Starting...\n\n", argv[0]);
unsigned int tableCPU[QRNG_DIMENSIONS][QRNG_RESOLUTION];
float *h_OutputGPU, *d_Output;
int dim, pos;
double delta, ref, sumDelta, sumRef, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
if (sizeof(INT64) != 8) {
printf("sizeof(INT64) != 8\n");
return 0;
}
sdkCreateTimer(&hTimer);
printf("Allocating GPU memory...\n");
checkCudaErrors(
DPCT_CHECK_ERROR(d_Output = sycl::malloc_device<float>(
QRNG_DIMENSIONS * N, dpct::get_default_queue())));
printf("Allocating CPU memory...\n");
h_OutputGPU = (float *)malloc(QRNG_DIMENSIONS * N * sizeof(float));
printf("Initializing QRNG tables...\n\n");
initQuasirandomGenerator(tableCPU);
initTableGPU(tableCPU);
printf("Testing QRNG...\n\n");
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue()
.memset(d_Output, 0, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
int numIterations = 20;
for (int i = -1; i < numIterations; i++) {
if (i == 0) {
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_current_device().queues_wait_and_throw()));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
}
quasirandomGeneratorGPU(d_Output, 0, N);
}
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_current_device().queues_wait_and_throw()));
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / (double)numIterations * 1e-3;
printf(
"quasirandomGenerator, Throughput = %.4f GNumbers/s, Time = %.5f s, Size "
"= %u Numbers, NumDevsUsed = %u, Workgroup = %u\n",
(double)QRNG_DIMENSIONS * (double)N * 1.0E-9 / gpuTime, gpuTime,
QRNG_DIMENSIONS * N, 1, 128 * QRNG_DIMENSIONS);
printf("\nReading GPU results...\n");
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue()
.memcpy(h_OutputGPU, d_Output, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
printf("Comparing to the CPU results...\n\n");
sumDelta = 0;
sumRef = 0;
for (dim = 0; dim < QRNG_DIMENSIONS; dim++)
for (pos = 0; pos < N; pos++) {
ref = getQuasirandomValue63(pos, dim);
delta = (double)h_OutputGPU[dim * N + pos] - ref;
sumDelta += fabs(delta);
sumRef += fabs(ref);
}
printf("L1 norm: %E\n", sumDelta / sumRef);
printf("\nTesting inverseCNDgpu()...\n\n");
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue()
.memset(d_Output, 0, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
for (int i = -1; i < numIterations; i++) {
if (i == 0) {
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_current_device().queues_wait_and_throw()));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
}
inverseCNDgpu(d_Output, NULL, QRNG_DIMENSIONS * N);
}
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_current_device().queues_wait_and_throw()));
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / (double)numIterations * 1e-3;
printf(
"quasirandomGenerator-inverse, Throughput = %.4f GNumbers/s, Time = %.5f "
"s, Size = %u Numbers, NumDevsUsed = %u, Workgroup = %u\n",
(double)QRNG_DIMENSIONS * (double)N * 1E-9 / gpuTime, gpuTime,
QRNG_DIMENSIONS * N, 1, 128);
printf("Reading GPU results...\n");
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue()
.memcpy(h_OutputGPU, d_Output, QRNG_DIMENSIONS * N * sizeof(float))
.wait()));
printf("\nComparing to the CPU results...\n");
sumDelta = 0;
sumRef = 0;
unsigned int distance = ((unsigned int)-1) / (QRNG_DIMENSIONS * N + 1);
for (pos = 0; pos < QRNG_DIMENSIONS * N; pos++) {
unsigned int d = (pos + 1) * distance;
ref = MoroInvCNDcpu(d);
delta = (double)h_OutputGPU[pos] - ref;
sumDelta += fabs(delta);
sumRef += fabs(ref);
}
printf("L1 norm: %E\n\n", L1norm = sumDelta / sumRef);
printf("Shutting down...\n");
sdkDeleteTimer(&hTimer);
free(h_OutputGPU);
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Output, dpct::get_default_queue())));
exit(L1norm < 1e-6 ? EXIT_SUCCESS : EXIT_FAILURE);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/quasirandomGenerator/quasirandomGenerator_kernel.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUASIRANDOMGENERATOR_KERNEL_CUH
#define QUASIRANDOMGENERATOR_KERNEL_CUH
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include "quasirandomGenerator_common.h"
// Fast integer multiplication
/*
DPCT1064:15: Migrated __umul24 call is used in a macro/template definition and
may not be valid for all macro/template uses. Adjust the code.
*/
#define MUL(a, b) sycl::mul24((unsigned int)a, (unsigned int)b)
////////////////////////////////////////////////////////////////////////////////
// Niederreiter quasirandom number generation kernel
////////////////////////////////////////////////////////////////////////////////
static dpct::constant_memory<unsigned int, 2> c_Table(QRNG_DIMENSIONS,
QRNG_RESOLUTION);
static void quasirandomGeneratorKernel(float *d_Output,
unsigned int seed,
unsigned int N,
const sycl::nd_item<3> &item_ct1,
dpct::accessor<unsigned int, dpct::constant, 2> c_Table) {
unsigned int *dimBase = &c_Table[item_ct1.get_local_id(1)][0];
unsigned int tid = MUL(item_ct1.get_local_range(2), item_ct1.get_group(2)) +
item_ct1.get_local_id(2);
unsigned int threadN =
MUL(item_ct1.get_local_range(2), item_ct1.get_group_range(2));
for (unsigned int pos = tid; pos < N; pos += threadN) {
unsigned int result = 0;
unsigned int data = seed + pos;
for (int bit = 0; bit < QRNG_RESOLUTION; bit++, data >>= 1)
if (data & 1) {
result ^= dimBase[bit];
}
d_Output[MUL(item_ct1.get_local_id(1), N) + pos] =
(float)(result + 1) * INT_SCALE;
}
}
// Table initialization routine
extern "C" void initTableGPU(
unsigned int tableCPU[QRNG_DIMENSIONS][QRNG_RESOLUTION]) {
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue()
.memcpy(c_Table.get_ptr(), tableCPU,
QRNG_DIMENSIONS * QRNG_RESOLUTION * sizeof(unsigned int))
.wait()));
}
// Host-side interface
extern "C" void quasirandomGeneratorGPU(float *d_Output, unsigned int seed,
unsigned int N) {
sycl::range<3> threads(1, QRNG_DIMENSIONS, 128);
/*
DPCT1049:0: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().submit([&](sycl::handler &cgh) {
c_Table.init();
auto c_Table_acc_ct1 = c_Table.get_access(cgh);
cgh.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 128) * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
quasirandomGeneratorKernel(d_Output, seed, N, item_ct1,
c_Table_acc_ct1);
});
});
getLastCudaError("quasirandomGeneratorKernel() execution failed.\n");
}
////////////////////////////////////////////////////////////////////////////////
// Moro's Inverse Cumulative Normal Distribution function approximation
////////////////////////////////////////////////////////////////////////////////
inline float MoroInvCNDgpu(unsigned int x) {
const float a1 = 2.50662823884f;
const float a2 = -18.61500062529f;
const float a3 = 41.39119773534f;
const float a4 = -25.44106049637f;
const float b1 = -8.4735109309f;
const float b2 = 23.08336743743f;
const float b3 = -21.06224101826f;
const float b4 = 3.13082909833f;
const float c1 = 0.337475482272615f;
const float c2 = 0.976169019091719f;
const float c3 = 0.160797971491821f;
const float c4 = 2.76438810333863E-02f;
const float c5 = 3.8405729373609E-03f;
const float c6 = 3.951896511919E-04f;
const float c7 = 3.21767881768E-05f;
const float c8 = 2.888167364E-07f;
const float c9 = 3.960315187E-07f;
float z;
bool negate = false;
// Ensure the conversion to floating point will give a value in the
// range (0,0.5] by restricting the input to the bottom half of the
// input domain. We will later reflect the result if the input was
// originally in the top half of the input domain
if (x >= 0x80000000UL) {
x = 0xffffffffUL - x;
negate = true;
}
// x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff])
// Convert to floating point in (0,0.5]
const float x1 = 1.0f / static_cast<float>(0xffffffffUL);
const float x2 = x1 / 2.0f;
float p1 = x * x1 + x2;
// Convert to floating point in (-0.5,0]
float p2 = p1 - 0.5f;
// The input to the Moro inversion is p2 which is in the range
// (-0.5,0]. This means that our output will be the negative side
// of the bell curve (which we will reflect if "negate" is true).
// Main body of the bell curve for |p| < 0.42
if (p2 > -0.42f) {
z = p2 * p2;
z = p2 * (((a4 * z + a3) * z + a2) * z + a1) /
((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0f);
}
// Special case (Chebychev) for tail
else {
z = sycl::log(-sycl::log(p1));
z = -(c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z
* (c8 + z * c9))))))));
}
// If the original input (x) was in the top half of the range, reflect
// to get the positive side of the bell curve
return negate ? -z : z;
}
////////////////////////////////////////////////////////////////////////////////
// Main kernel. Choose between transforming
// input sequence and uniform ascending (0, 1) sequence
////////////////////////////////////////////////////////////////////////////////
static void inverseCNDKernel(float *d_Output, unsigned int *d_Input,
unsigned int pathN,
const sycl::nd_item<3> &item_ct1) {
unsigned int distance = ((unsigned int)-1) / (pathN + 1);
unsigned int tid = MUL(item_ct1.get_local_range(2), item_ct1.get_group(2)) +
item_ct1.get_local_id(2);
unsigned int threadN =
MUL(item_ct1.get_local_range(2), item_ct1.get_group_range(2));
// Transform input number sequence if it's supplied
if (d_Input) {
for (unsigned int pos = tid; pos < pathN; pos += threadN) {
unsigned int d = d_Input[pos];
d_Output[pos] = (float)MoroInvCNDgpu(d);
}
}
// Else generate input uniformly placed samples on the fly
// and write to destination
else {
for (unsigned int pos = tid; pos < pathN; pos += threadN) {
unsigned int d = (pos + 1) * distance;
d_Output[pos] = (float)MoroInvCNDgpu(d);
}
}
}
extern "C" void inverseCNDgpu(float *d_Output, unsigned int *d_Input,
unsigned int N) {
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 128) * sycl::range<3>(1, 1, 128),
sycl::range<3>(1, 1, 128)),
[=](sycl::nd_item<3> item_ct1) {
inverseCNDKernel(d_Output, d_Input, N, item_ct1);
});
getLastCudaError("inverseCNDKernel() execution failed.\n");
}
#endif
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/ccl.hpp>
#include <unordered_map>
#include <memory>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &
get_kvs(const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs>
create_kvs(const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr)
ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() {
delete _ccl_stream_ptr;
};
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const {
return _comm.rank();
}
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const {
return _comm.size();
}
/// Return underlying native device, which was used in oneapi::ccl::communicator
sycl::device get_device() const {
return _comm.get_device().get_native();
}
/// \brief allreduce is a collective communication operation that performs the global reduction operation
/// on values from all ranks of communicator and distributes the result back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the communicator
/// and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param root the rank that gets the result of reduction
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that performs the global reduction operation
/// on values from all ranks of the communicator and scatters the result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param recv_count the number of elements of type @c dtype in receive block
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if(!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr),
_imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh)
{ cgh.host_task([=]
{
_imp->_ccl_event_impl.wait();
delete _imp; }); });
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/MapReduce/guided_quasirandomGenerator_SYCLMigration/01_dpct_output/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <sycl/sycl.hpp>
#include <complex>
#include <type_traits>
#include <cassert>
#include <cstdint>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T> class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints> struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T> struct DataType { using T2 = T; };
template <typename T> struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction).wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32)
return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T> inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a)
return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T> inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask,
sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void
nd_range_barrier(const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void
nd_range_barrier(const sycl::nd_item<1> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf)
num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ?
&get_default_queue()
: reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params,
typename R, typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type = std::tuple_element_t<account_for_default_params<i>(),
std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i-1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra)
return nullptr;
for (; (std::size_t) *extra != 0; ++extra) {
if ((std::size_t) *extra == 1) {
return static_cast<char*>(*(extra+1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params),
args_buffer(get_args_buffer(extra))
{}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i>*>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
Subsets and Splits